diff options
Diffstat (limited to 'drivers/net/ethernet')
451 files changed, 25923 insertions, 7700 deletions
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig index 1a4cf6a259f6..ad3ce501e7a5 100644 --- a/drivers/net/ethernet/airoha/Kconfig +++ b/drivers/net/ethernet/airoha/Kconfig @@ -24,4 +24,11 @@ config NET_AIROHA This driver supports the gigabit ethernet MACs in the Airoha SoC family. +config NET_AIROHA_FLOW_STATS + default y + bool "Airoha flow stats" + depends on NET_AIROHA && NET_AIROHA_NPU + help + Enable Aiorha flowtable statistic counters. + endif #NET_VENDOR_AIROHA diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 1e9ab65218ff..d1d3b854361e 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -5,6 +5,7 @@ */ #include <linux/of.h> #include <linux/of_net.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/tcp.h> #include <linux/u64_stats_sync.h> @@ -34,46 +35,40 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) return val; } -static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, - u32 clear, u32 set) +static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank, + int index, u32 clear, u32 set) { + struct airoha_qdma *qdma = irq_bank->qdma; + int bank = irq_bank - &qdma->irq_banks[0]; unsigned long flags; - if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) + if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask))) return; - spin_lock_irqsave(&qdma->irq_lock, flags); + spin_lock_irqsave(&irq_bank->irq_lock, flags); - qdma->irqmask[index] &= ~clear; - qdma->irqmask[index] |= set; - airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); + irq_bank->irqmask[index] &= ~clear; + irq_bank->irqmask[index] |= set; + airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index), + irq_bank->irqmask[index]); /* Read irq_enable register in order to guarantee the update above * completes in the spinlock critical section. */ - airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); + airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index)); - spin_unlock_irqrestore(&qdma->irq_lock, flags); + spin_unlock_irqrestore(&irq_bank->irq_lock, flags); } -static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, - u32 mask) +static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank, + int index, u32 mask) { - airoha_qdma_set_irqmask(qdma, index, 0, mask); + airoha_qdma_set_irqmask(irq_bank, index, 0, mask); } -static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, - u32 mask) +static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank, + int index, u32 mask) { - airoha_qdma_set_irqmask(qdma, index, mask, 0); -} - -static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) -{ - /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. - * GDM{2,3,4} can be used as wan port connected to an external - * phy module. - */ - return port->id == 1; + airoha_qdma_set_irqmask(irq_bank, index, mask, 0); } static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr) @@ -527,6 +522,25 @@ static int airoha_fe_init(struct airoha_eth *eth) /* disable IFC by default */ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); + airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), + FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) | + FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1)); + airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1), + FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) | + FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2)); + /* enable 1:N vlan action, init vlan table */ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); @@ -687,7 +701,7 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - airoha_ppe_check_skb(eth->ppe, hash); + airoha_ppe_check_skb(eth->ppe, q->skb, hash); done++; napi_gro_receive(&q->napi, q->skb); @@ -716,9 +730,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) done += cur; } while (cur && done < budget); - if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, - RX_DONE_INT_MASK); + if (done < budget && napi_complete(napi)) { + struct airoha_qdma *qdma = q->qdma; + int i, qid = q - &qdma->q_rx[0]; + int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1 + : QDMA_INT_REG_IDX2; + + for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { + if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i))) + continue; + + airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg, + BIT(qid % RX_DONE_HIGH_OFFSET)); + } + } return done; } @@ -921,7 +946,7 @@ unlock: } if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, + airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(id)); return done; @@ -1039,24 +1064,46 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) { struct airoha_eth *eth = qdma->eth; + int id = qdma - ð->qdma[0]; dma_addr_t dma_addr; + const char *name; + int size, index; u32 status; - int size; size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); - qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!qdma->hfwd.desc) + if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) return -ENOMEM; airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); - size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; - qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!qdma->hfwd.q) + name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id); + if (!name) return -ENOMEM; + index = of_property_match_string(eth->dev->of_node, + "memory-region-names", name); + if (index >= 0) { + struct reserved_mem *rmem; + struct device_node *np; + + /* Consume reserved memory for hw forwarding buffers queue if + * available in the DTS + */ + np = of_parse_phandle(eth->dev->of_node, "memory-region", + index); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + dma_addr = rmem->base; + } else { + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; + if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL)) + return -ENOMEM; + } + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, @@ -1068,7 +1115,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) LMGR_INIT_START | LMGR_SRAM_MODE_MASK | HW_FWD_DESC_NUM_MASK, FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | - LMGR_INIT_START); + LMGR_INIT_START | LMGR_SRAM_MODE_MASK); return read_poll_timeout(airoha_qdma_rr, status, !(status & LMGR_INIT_START), USEC_PER_MSEC, @@ -1151,14 +1198,24 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma) { int i; - /* clear pending irqs */ - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) + for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { + /* clear pending irqs */ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); - - /* setup irqs */ - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); + /* setup rx irqs */ + airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0, + INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i))); + airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1, + INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i))); + airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2, + INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i))); + airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3, + INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i))); + } + /* setup tx irqs */ + airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, + TX_COHERENT_LOW_INT_MASK | INT_TX_MASK); + airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4, + TX_COHERENT_HIGH_INT_MASK); /* setup irq binding */ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { @@ -1203,30 +1260,39 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma) static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) { - struct airoha_qdma *qdma = dev_instance; - u32 intr[ARRAY_SIZE(qdma->irqmask)]; + struct airoha_irq_bank *irq_bank = dev_instance; + struct airoha_qdma *qdma = irq_bank->qdma; + u32 rx_intr_mask = 0, rx_intr1, rx_intr2; + u32 intr[ARRAY_SIZE(irq_bank->irqmask)]; int i; - for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { + for (i = 0; i < ARRAY_SIZE(intr); i++) { intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); - intr[i] &= qdma->irqmask[i]; + intr[i] &= irq_bank->irqmask[i]; airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); } if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) return IRQ_NONE; - if (intr[1] & RX_DONE_INT_MASK) { - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, - RX_DONE_INT_MASK); + rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK; + if (rx_intr1) { + airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1); + rx_intr_mask |= rx_intr1; + } + + rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK; + if (rx_intr2) { + airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2); + rx_intr_mask |= (rx_intr2 << 16); + } - for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { - if (!qdma->q_rx[i].ndesc) - continue; + for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) + continue; - if (intr[1] & BIT(i)) - napi_schedule(&qdma->q_rx[i].napi); - } + if (rx_intr_mask & BIT(i)) + napi_schedule(&qdma->q_rx[i].napi); } if (intr[0] & INT_TX_MASK) { @@ -1234,7 +1300,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) if (!(intr[0] & TX_DONE_INT_MASK(i))) continue; - airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, + airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(i)); napi_schedule(&qdma->q_tx_irq[i].napi); } @@ -1243,6 +1309,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) return IRQ_HANDLED; } +static int airoha_qdma_init_irq_banks(struct platform_device *pdev, + struct airoha_qdma *qdma) +{ + struct airoha_eth *eth = qdma->eth; + int i, id = qdma - ð->qdma[0]; + + for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { + struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i]; + int err, irq_index = 4 * id + i; + const char *name; + + spin_lock_init(&irq_bank->irq_lock); + irq_bank->qdma = qdma; + + irq_bank->irq = platform_get_irq(pdev, irq_index); + if (irq_bank->irq < 0) + return irq_bank->irq; + + name = devm_kasprintf(eth->dev, GFP_KERNEL, + KBUILD_MODNAME ".%d", irq_index); + if (!name) + return -ENOMEM; + + err = devm_request_irq(eth->dev, irq_bank->irq, + airoha_irq_handler, IRQF_SHARED, name, + irq_bank); + if (err) + return err; + } + + return 0; +} + static int airoha_qdma_init(struct platform_device *pdev, struct airoha_eth *eth, struct airoha_qdma *qdma) @@ -1250,9 +1349,7 @@ static int airoha_qdma_init(struct platform_device *pdev, int err, id = qdma - ð->qdma[0]; const char *res; - spin_lock_init(&qdma->irq_lock); qdma->eth = eth; - res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); if (!res) return -ENOMEM; @@ -1262,12 +1359,7 @@ static int airoha_qdma_init(struct platform_device *pdev, return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), "failed to iomap qdma%d regs\n", id); - qdma->irq = platform_get_irq(pdev, 4 * id); - if (qdma->irq < 0) - return qdma->irq; - - err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, qdma); + err = airoha_qdma_init_irq_banks(pdev, qdma); if (err) return err; @@ -1627,7 +1719,6 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) if (port->id == 3) { /* FIXME: handle XSI_PCE1_PORT */ - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500); airoha_fe_rmw(eth, REG_FE_WAN_PORT, WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); @@ -2105,6 +2196,125 @@ static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port, } } +static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id, + u32 addr, enum trtcm_param_type param, + u32 *val_low, u32 *val_high) +{ + u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); + u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | + FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | + FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); + + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); + if (read_poll_timeout(airoha_qdma_rr, val, + val & RATE_LIMIT_PARAM_RW_DONE_MASK, + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma, + REG_TRTCM_CFG_PARAM(addr))) + return -ETIMEDOUT; + + *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); + if (val_high) + *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); + + return 0; +} + +static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id, + u32 addr, enum trtcm_param_type param, + u32 val) +{ + u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id); + u32 config = RATE_LIMIT_PARAM_RW_MASK | + FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) | + FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) | + FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx); + + airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); + airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); + + return read_poll_timeout(airoha_qdma_rr, val, + val & RATE_LIMIT_PARAM_RW_DONE_MASK, + USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, + qdma, REG_TRTCM_CFG_PARAM(addr)); +} + +static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id, + u32 addr, bool enable, u32 enable_mask) +{ + u32 val; + int err; + + err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, + &val, NULL); + if (err) + return err; + + val = enable ? val | enable_mask : val & ~enable_mask; + + return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, + val); +} + +static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma, + int queue_id, u32 rate_val, + u32 bucket_size) +{ + u32 val, config, tick, unit, rate, rate_frac; + int err; + + err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, + TRTCM_MISC_MODE, &config, NULL); + if (err) + return err; + + val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG); + tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val); + if (config & TRTCM_TICK_SEL) + tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val); + if (!tick) + return -EINVAL; + + unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick; + if (!unit) + return -EINVAL; + + rate = rate_val / unit; + rate_frac = rate_val % unit; + rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit; + rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) | + FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac); + + err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, + TRTCM_TOKEN_RATE_MODE, rate); + if (err) + return err; + + val = bucket_size; + if (!(config & TRTCM_PKT_MODE)) + val = max_t(u32, val, MIN_TOKEN_SIZE); + val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET); + + return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, + TRTCM_BUCKETSIZE_SHIFT_MODE, val); +} + +static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id, + bool enable, enum trtcm_unit_type unit) +{ + bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8; + enum trtcm_param mode = TRTCM_METER_MODE; + int err; + + mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0; + err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, + enable, mode); + if (err) + return err; + + return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, + tick_sel, TRTCM_TICK_SEL); +} + static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, u32 addr, enum trtcm_param_type param, enum trtcm_mode_type mode, @@ -2269,10 +2479,142 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, return 0; } +static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port, + u32 rate, u32 bucket_size, + enum trtcm_unit_type unit_type) +{ + struct airoha_qdma *qdma = port->qdma; + int i; + + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + int err; + + if (!qdma->q_rx[i].ndesc) + continue; + + err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type); + if (err) + return err; + + err = airoha_qdma_set_rl_token_bucket(qdma, i, rate, + bucket_size); + if (err) + return err; + } + + return 0; +} + +static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f) +{ + const struct flow_action *actions = &f->rule->action; + const struct flow_action_entry *act; + + if (!flow_action_has_entries(actions)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "filter run with no actions"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(actions)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "only once action per filter is supported"); + return -EOPNOTSUPP; + } + + act = &actions->entries[0]; + if (act->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action"); + return -EOPNOTSUPP; + } + + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "invalid exceed action id"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "invalid notexceed action id"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(actions, act)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "action accept must be last"); + return -EOPNOTSUPP; + } + + if (act->police.peakrate_bytes_ps || act->police.avrate || + act->police.overhead || act->police.mtu) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "peakrate/avrate/overhead/mtu unsupported"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int airoha_dev_tc_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *f) +{ + enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT; + struct airoha_gdm_port *port = netdev_priv(dev); + u32 rate = 0, bucket_size = 0; + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: { + const struct flow_action_entry *act; + int err; + + err = airoha_tc_matchall_act_validate(f); + if (err) + return err; + + act = &f->rule->action.entries[0]; + if (act->police.rate_pkt_ps) { + rate = act->police.rate_pkt_ps; + bucket_size = act->police.burst_pkt; + unit_type = TRTCM_PACKET_UNIT; + } else { + rate = div_u64(act->police.rate_bytes_ps, 1000); + rate = rate << 3; /* Kbps */ + bucket_size = act->police.burst; + } + fallthrough; + } + case TC_CLSMATCHALL_DESTROY: + return airoha_qdma_set_rx_meter(port, rate, bucket_size, + unit_type); + default: + return -EOPNOTSUPP; + } +} + +static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return airoha_ppe_setup_tc_block_cb(dev, type_data); + case TC_SETUP_CLSMATCHALL: + return airoha_dev_tc_matchall(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, struct flow_block_offload *f) { - flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb; + flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb; static LIST_HEAD(block_cb_list); struct flow_block_cb *block_cb; @@ -2511,7 +2853,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, dev->features |= dev->hw_features; dev->vlan_features = dev->hw_features; dev->dev.of_node = np; - dev->irq = qdma->irq; + dev->irq = qdma->irq_banks[0].irq; SET_NETDEV_DEV(dev, eth->dev); /* reserve hw queues for HTB offloading */ @@ -2541,7 +2883,15 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, if (err) return err; - return register_netdev(dev); + err = register_netdev(dev); + if (err) + goto free_metadata_dst; + + return 0; + +free_metadata_dst: + airoha_metadata_dst_free(port); + return err; } static int airoha_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index ec8908f904c6..b815697302bf 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -17,6 +17,7 @@ #define AIROHA_MAX_NUM_GDM_PORTS 4 #define AIROHA_MAX_NUM_QDMA 2 +#define AIROHA_MAX_NUM_IRQ_BANKS 4 #define AIROHA_MAX_DSA_PORTS 7 #define AIROHA_MAX_NUM_RSTS 3 #define AIROHA_MAX_NUM_XSI_RSTS 5 @@ -49,6 +50,14 @@ #define PPE_NUM 2 #define PPE1_SRAM_NUM_ENTRIES (8 * 1024) #define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) +#ifdef CONFIG_NET_AIROHA_FLOW_STATS +#define PPE1_STATS_NUM_ENTRIES (4 * 1024) +#else +#define PPE1_STATS_NUM_ENTRIES 0 +#endif /* CONFIG_NET_AIROHA_FLOW_STATS */ +#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) +#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) +#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) #define PPE_DRAM_NUM_ENTRIES (16 * 1024) #define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) #define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) @@ -127,6 +136,11 @@ enum tx_sched_mode { TC_SCH_WRR2, }; +enum trtcm_unit_type { + TRTCM_BYTE_UNIT, + TRTCM_PACKET_UNIT, +}; + enum trtcm_param_type { TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */ TRTCM_TOKEN_RATE_MODE, @@ -255,6 +269,8 @@ struct airoha_foe_mac_info { u16 pppoe_id; u16 src_mac_lo; + + u32 meter; }; #define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24) @@ -290,6 +306,11 @@ struct airoha_foe_mac_info { #define AIROHA_FOE_TUNNEL BIT(6) #define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0) +#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16) +#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9) +#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5) +#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0) + struct airoha_foe_bridge { u32 dest_mac_hi; @@ -373,6 +394,8 @@ struct airoha_foe_ipv6 { u32 ib2; struct airoha_foe_mac_info_common l2; + + u32 meter; }; struct airoha_foe_entry { @@ -391,6 +414,16 @@ struct airoha_foe_entry { }; }; +struct airoha_foe_stats { + u32 bytes; + u32 packets; +}; + +struct airoha_foe_stats64 { + u64 bytes; + u64 packets; +}; + struct airoha_flow_data { struct ethhdr eth; @@ -422,37 +455,64 @@ struct airoha_flow_data { } pppoe; }; +enum airoha_flow_entry_type { + FLOW_TYPE_L4, + FLOW_TYPE_L2, + FLOW_TYPE_L2_SUBFLOW, +}; + struct airoha_flow_table_entry { - struct hlist_node list; + union { + struct hlist_node list; /* PPE L3 flow entry */ + struct { + struct rhash_head l2_node; /* L2 flow entry */ + struct hlist_head l2_flows; /* PPE L2 subflows list */ + }; + }; struct airoha_foe_entry data; + struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */ u32 hash; + struct airoha_foe_stats64 stats; + enum airoha_flow_entry_type type; + struct rhash_head node; unsigned long cookie; }; -struct airoha_qdma { - struct airoha_eth *eth; - void __iomem *regs; +/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */ +#define RX_IRQ0_BANK_PIN_MASK 0x839f +#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000 +#define RX_IRQ2_BANK_PIN_MASK 0x20 +#define RX_IRQ3_BANK_PIN_MASK 0x40 +#define RX_IRQ_BANK_PIN_MASK(_n) \ + (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \ + ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \ + ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \ + RX_IRQ0_BANK_PIN_MASK) + +struct airoha_irq_bank { + struct airoha_qdma *qdma; /* protect concurrent irqmask accesses */ spinlock_t irq_lock; u32 irqmask[QDMA_INT_REG_MAX]; int irq; +}; + +struct airoha_qdma { + struct airoha_eth *eth; + void __iomem *regs; atomic_t users; + struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS]; + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; - - /* descriptor and packet buffers for qdma hw forward */ - struct { - void *desc; - void *q; - } hfwd; }; struct airoha_gdm_port { @@ -480,9 +540,14 @@ struct airoha_ppe { void *foe; dma_addr_t foe_dma; + struct rhashtable l2_flows; + struct hlist_head *foe_flow; u16 foe_check_time[PPE_NUM_ENTRIES]; + struct airoha_foe_stats *foe_stats; + dma_addr_t foe_stats_dma; + struct dentry *debugfs_dir; }; @@ -532,16 +597,27 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val); #define airoha_qdma_clear(qdma, offset, val) \ airoha_rmw((qdma)->regs, (offset), (val), 0) +static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) +{ + /* GDM1 port on EN7581 SoC is connected to the lan dsa switch. + * GDM{2,3,4} can be used as wan port connected to an external + * phy module. + */ + return port->id == 1; +} + bool airoha_is_valid_gdm_port(struct airoha_eth *eth, struct airoha_gdm_port *port); -void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash); -int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); +void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb, + u16 hash); +int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data); int airoha_ppe_init(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth); struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash); +void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, + struct airoha_foe_stats64 *stats); #ifdef CONFIG_DEBUG_FS int airoha_ppe_debugfs_init(struct airoha_ppe *ppe); diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index ead0625e781f..0e5b8c21b9aa 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -12,6 +12,7 @@ #include <linux/of_reserved_mem.h> #include <linux/regmap.h> +#include "airoha_eth.h" #include "airoha_npu.h" #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" @@ -72,6 +73,7 @@ enum { PPE_FUNC_SET_WAIT_HWNAT_INIT, PPE_FUNC_SET_WAIT_HWNAT_DEINIT, PPE_FUNC_SET_WAIT_API, + PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP, }; enum { @@ -115,6 +117,10 @@ struct ppe_mbox_data { u32 size; u32 data; } set_info; + struct { + u32 npu_stats_addr; + u32 foe_stats_addr; + } stats_info; }; }; @@ -124,17 +130,12 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, u16 core = 0; /* FIXME */ u32 val, offset = core << 4; dma_addr_t dma_addr; - void *addr; int ret; - addr = kmemdup(p, size, GFP_ATOMIC); - if (!addr) - return -ENOMEM; - - dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE); + dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE); ret = dma_mapping_error(npu->dev, dma_addr); if (ret) - goto out; + return ret; spin_lock_bh(&npu->cores[core].lock); @@ -155,8 +156,6 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, spin_unlock_bh(&npu->cores[core].lock); dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE); -out: - kfree(addr); return ret; } @@ -261,79 +260,137 @@ static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance) static int airoha_npu_ppe_init(struct airoha_npu *npu) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT, - .init_info = { - .ppe_type = PPE_TYPE_L2B_IPV4_IPV6, - .wan_mode = QDMA_WAN_ETHER, - }, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT; + ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6; + ppe_data->init_info.wan_mode = QDMA_WAN_ETHER; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); + + return err; } static int airoha_npu_ppe_deinit(struct airoha_npu *npu) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); + + return err; } static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu, dma_addr_t foe_addr, int sram_num_entries) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_API, - .set_info = { - .func_id = PPE_SRAM_RESET_VAL, - .data = foe_addr, - .size = sram_num_entries, - }, - }; + struct ppe_mbox_data *ppe_data; + int err; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_API; + ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL; + ppe_data->set_info.data = foe_addr; + ppe_data->set_info.size = sram_num_entries; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + kfree(ppe_data); - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + return err; } static int airoha_npu_foe_commit_entry(struct airoha_npu *npu, dma_addr_t foe_addr, u32 entry_size, u32 hash, bool ppe2) { - struct ppe_mbox_data ppe_data = { - .func_type = NPU_OP_SET, - .func_id = PPE_FUNC_SET_WAIT_API, - .set_info = { - .data = foe_addr, - .size = entry_size, - }, - }; + struct ppe_mbox_data *ppe_data; int err; - ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY - : PPE_SRAM_SET_ENTRY; + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_API; + ppe_data->set_info.data = foe_addr; + ppe_data->set_info.size = entry_size; + ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY + : PPE_SRAM_SET_ENTRY; - err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); if (err) - return err; + goto out; + + ppe_data->set_info.func_id = PPE_SRAM_SET_VAL; + ppe_data->set_info.data = hash; + ppe_data->set_info.size = sizeof(u32); - ppe_data.set_info.func_id = PPE_SRAM_SET_VAL; - ppe_data.set_info.data = hash; - ppe_data.set_info.size = sizeof(u32); + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); +out: + kfree(ppe_data); - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, - sizeof(struct ppe_mbox_data)); + return err; } -struct airoha_npu *airoha_npu_get(struct device *dev) +static int airoha_npu_stats_setup(struct airoha_npu *npu, + dma_addr_t foe_stats_addr) +{ + int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats); + struct ppe_mbox_data *ppe_data; + + if (!size) /* flow stats are disabled */ + return 0; + + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC); + if (!ppe_data) + return -ENOMEM; + + ppe_data->func_type = NPU_OP_SET; + ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP; + ppe_data->stats_info.foe_stats_addr = foe_stats_addr; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data, + sizeof(*ppe_data)); + if (err) + goto out; + + npu->stats = devm_ioremap(npu->dev, + ppe_data->stats_info.npu_stats_addr, + size); + if (!npu->stats) + err = -ENOMEM; +out: + kfree(ppe_data); + + return err; +} + +struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr) { struct platform_device *pdev; struct device_node *np; @@ -371,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct device *dev) goto error_module_put; } + if (stats_addr) { + int err; + + err = airoha_npu_stats_setup(npu, *stats_addr); + if (err) { + dev_err(dev, "failed to allocate npu stats buffer\n"); + npu = ERR_PTR(err); + goto error_module_put; + } + } + return npu; error_module_put: diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h index a2b8ae4d9473..98ec3be74ce4 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.h +++ b/drivers/net/ethernet/airoha/airoha_npu.h @@ -17,6 +17,8 @@ struct airoha_npu { struct work_struct wdt_work; } cores[NPU_NUM_CORES]; + struct airoha_foe_stats __iomem *stats; + struct { int (*ppe_init)(struct airoha_npu *npu); int (*ppe_deinit)(struct airoha_npu *npu); @@ -30,5 +32,5 @@ struct airoha_npu { } ops; }; -struct airoha_npu *airoha_npu_get(struct device *dev); +struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr); void airoha_npu_put(struct airoha_npu *npu); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index f10dab935cab..12d32c92717a 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -24,6 +24,13 @@ static const struct rhashtable_params airoha_flow_table_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params airoha_l2_flow_table_params = { + .head_offset = offsetof(struct airoha_flow_table_entry, l2_node), + .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge), + .key_len = 2 * ETH_ALEN, + .automatic_shrinking = true, +}; + static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) { return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; @@ -77,6 +84,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_TB_CFG_KEEPALIVE_MASK | PPE_TB_ENTRY_SIZE_MASK, FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); @@ -95,7 +103,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) if (airoha_ppe2_is_enabled(eth)) { sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES); + PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), PPE_SRAM_TB_NUM_ENTRY_MASK | PPE_DRAM_TB_NUM_ENTRY_MASK, @@ -112,7 +120,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) dram_num_entries)); } else { sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES); + PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), PPE_SRAM_TB_NUM_ENTRY_MASK | PPE_DRAM_TB_NUM_ENTRY_MASK, @@ -197,6 +205,15 @@ static int airoha_get_dsa_port(struct net_device **dev) #endif } +static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br, + struct ethhdr *eh) +{ + br->dest_mac_hi = get_unaligned_be32(eh->h_dest); + br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4); + br->src_mac_hi = get_unaligned_be16(eh->h_source); + br->src_mac_lo = get_unaligned_be32(eh->h_source + 2); +} + static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, struct airoha_foe_entry *hwe, struct net_device *dev, int type, @@ -234,6 +251,12 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, else pse_port = 2; /* uplink relies on GDM2 loopback */ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); + + /* For downlink traffic consume SRAM memory for hw forwarding + * descriptors queue. + */ + if (airhoa_is_lan_gdm_port(port)) + val |= AIROHA_FOE_IB2_FAST_PATH; } if (is_multicast_ether_addr(data->eth.h_dest)) @@ -247,13 +270,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f); if (type == PPE_PKT_TYPE_BRIDGE) { - hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest); - hwe->bridge.dest_mac_lo = - get_unaligned_be16(data->eth.h_dest + 4); - hwe->bridge.src_mac_hi = - get_unaligned_be16(data->eth.h_source); - hwe->bridge.src_mac_lo = - get_unaligned_be32(data->eth.h_source + 2); + airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth); hwe->bridge.data = qdata; hwe->bridge.ib2 = val; l2 = &hwe->bridge.l2.common; @@ -378,6 +395,19 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1]; hv3 ^= hwe->ipv6.src_ip[0]; break; + case PPE_PKT_TYPE_BRIDGE: { + struct airoha_foe_mac_info *l2 = &hwe->bridge.l2; + + hv1 = l2->common.src_mac_hi & 0xffff; + hv1 = hv1 << 16 | l2->src_mac_lo; + + hv2 = l2->common.dest_mac_lo; + hv2 = hv2 << 16; + hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16); + + hv3 = l2->common.dest_mac_hi; + break; + } case PPE_PKT_TYPE_IPV4_DSLITE: case PPE_PKT_TYPE_IPV6_6RD: default: @@ -394,6 +424,77 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) return hash; } +static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) +{ + if (!airoha_ppe2_is_enabled(ppe->eth)) + return hash; + + return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES + : hash; +} + +static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, + struct airoha_npu *npu, + int index) +{ + memset_io(&npu->stats[index], 0, sizeof(*npu->stats)); + memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats)); +} + +static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, + struct airoha_npu *npu) +{ + int i; + + for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); +} + +static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, + struct airoha_npu *npu, + struct airoha_foe_entry *hwe, + u32 hash) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 index, pse_port, val, *data, *ib2, *meter; + u8 nbq; + + index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); + if (index >= PPE_STATS_NUM_ENTRIES) + return; + + if (type == PPE_PKT_TYPE_BRIDGE) { + data = &hwe->bridge.data; + ib2 = &hwe->bridge.ib2; + meter = &hwe->bridge.l2.meter; + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + data = &hwe->ipv6.data; + ib2 = &hwe->ipv6.ib2; + meter = &hwe->ipv6.meter; + } else { + data = &hwe->ipv4.data; + ib2 = &hwe->ipv4.ib2; + meter = &hwe->ipv4.l2.meter; + } + + airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index); + + val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data); + *data = (*data & ~AIROHA_FOE_ACTDP) | + FIELD_PREP(AIROHA_FOE_ACTDP, val); + + val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | + AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH); + *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val); + + pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2); + nbq = pse_port == 1 ? 6 : 5; + *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT | + AIROHA_FOE_IB2_PSE_QOS); + *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) | + FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq); +} + struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash) { @@ -447,6 +548,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); u32 ts = airoha_ppe_get_timestamp(ppe); struct airoha_eth *eth = ppe->eth; + struct airoha_npu *npu; + int err = 0; memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); wmb(); @@ -455,31 +558,127 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); hwe->ib1 = e->ib1; + rcu_read_lock(); + + npu = rcu_dereference(eth->npu); + if (!npu) { + err = -ENODEV; + goto unlock; + } + + airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); + if (hash < PPE_SRAM_NUM_ENTRIES) { dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); bool ppe2 = airoha_ppe2_is_enabled(eth) && hash >= PPE1_SRAM_NUM_ENTRIES; - struct airoha_npu *npu; - int err = -ENODEV; - rcu_read_lock(); - npu = rcu_dereference(eth->npu); - if (npu) - err = npu->ops.ppe_foe_commit_entry(npu, addr, - sizeof(*hwe), hash, - ppe2); - rcu_read_unlock(); + err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), + hash, ppe2); + } +unlock: + rcu_read_unlock(); + + return err; +} - return err; +static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + lockdep_assert_held(&ppe_lock); + + hlist_del_init(&e->list); + if (e->hash != 0xffff) { + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE; + e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, + AIROHA_FOE_STATE_INVALID); + airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash); + e->hash = 0xffff; + } + if (e->type == FLOW_TYPE_L2_SUBFLOW) { + hlist_del_init(&e->l2_subflow_node); + kfree(e); } +} + +static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + struct hlist_head *head = &e->l2_flows; + struct hlist_node *n; + + lockdep_assert_held(&ppe_lock); + + rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node, + airoha_l2_flow_table_params); + hlist_for_each_entry_safe(e, n, head, l2_subflow_node) + airoha_ppe_foe_remove_flow(ppe, e); +} + +static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + spin_lock_bh(&ppe_lock); + + if (e->type == FLOW_TYPE_L2) + airoha_ppe_foe_remove_l2_flow(ppe, e); + else + airoha_ppe_foe_remove_flow(ppe, e); + + spin_unlock_bh(&ppe_lock); +} + +static int +airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e, + u32 hash) +{ + u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP; + struct airoha_foe_entry *hwe_p, hwe; + struct airoha_flow_table_entry *f; + struct airoha_foe_mac_info *l2; + int type; + + hwe_p = airoha_ppe_foe_get_entry(ppe, hash); + if (!hwe_p) + return -EINVAL; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return -ENOMEM; + + hlist_add_head(&f->l2_subflow_node, &e->l2_flows); + f->type = FLOW_TYPE_L2_SUBFLOW; + f->hash = hash; + + memcpy(&hwe, hwe_p, sizeof(*hwe_p)); + hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask); + l2 = &hwe.bridge.l2; + memcpy(l2, &e->data.bridge.l2, sizeof(*l2)); + + type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1); + if (type == PPE_PKT_TYPE_IPV4_HNAPT) + memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple, + sizeof(hwe.ipv4.new_tuple)); + else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T && + l2->common.etype == ETH_P_IP) + l2->common.etype = ETH_P_IPV6; + + hwe.bridge.ib2 = e->data.bridge.ib2; + hwe.bridge.data = e->data.bridge.data; + airoha_ppe_foe_commit_entry(ppe, &hwe, hash); return 0; } -static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash) +static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, + struct sk_buff *skb, + u32 hash) { struct airoha_flow_table_entry *e; + struct airoha_foe_bridge br = {}; struct airoha_foe_entry *hwe; + bool commit_done = false; struct hlist_node *n; u32 index, state; @@ -495,21 +694,68 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash) index = airoha_ppe_foe_get_entry_hash(hwe); hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { - if (airoha_ppe_foe_compare_entry(e, hwe)) { - airoha_ppe_foe_commit_entry(ppe, &e->data, hash); - e->hash = hash; - break; + if (e->type == FLOW_TYPE_L2_SUBFLOW) { + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); + if (state != AIROHA_FOE_STATE_BIND) { + e->hash = 0xffff; + airoha_ppe_foe_remove_flow(ppe, e); + } + continue; + } + + if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) { + e->hash = 0xffff; + continue; } + + airoha_ppe_foe_commit_entry(ppe, &e->data, hash); + commit_done = true; + e->hash = hash; } + + if (commit_done) + goto unlock; + + airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb)); + e = rhashtable_lookup_fast(&ppe->l2_flows, &br, + airoha_l2_flow_table_params); + if (e) + airoha_ppe_foe_commit_subflow_entry(ppe, e, hash); unlock: spin_unlock_bh(&ppe_lock); } +static int +airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + struct airoha_flow_table_entry *prev; + + e->type = FLOW_TYPE_L2; + prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node, + airoha_l2_flow_table_params); + if (!prev) + return 0; + + if (IS_ERR(prev)) + return PTR_ERR(prev); + + return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node, + &e->l2_node, + airoha_l2_flow_table_params); +} + static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, struct airoha_flow_table_entry *e) { - u32 hash = airoha_ppe_foe_get_entry_hash(&e->data); + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1); + u32 hash; + + if (type == PPE_PKT_TYPE_BRIDGE) + return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); + hash = airoha_ppe_foe_get_entry_hash(&e->data); + e->type = FLOW_TYPE_L4; e->hash = 0xffff; spin_lock_bh(&ppe_lock); @@ -519,23 +765,98 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, return 0; } -static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe, +static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1) +{ + u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); + u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe); + int idle; + + if (state == AIROHA_FOE_STATE_BIND) { + ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1); + ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP; + } else { + ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1); + now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now); + ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP; + } + idle = now - ts; + + return idle < 0 ? idle + ts_mask + 1 : idle; +} + +static void +airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); + struct airoha_flow_table_entry *iter; + struct hlist_node *n; + + lockdep_assert_held(&ppe_lock); + + hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) { + struct airoha_foe_entry *hwe; + u32 ib1, state; + int idle; + + hwe = airoha_ppe_foe_get_entry(ppe, iter->hash); + ib1 = READ_ONCE(hwe->ib1); + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1); + if (state != AIROHA_FOE_STATE_BIND) { + iter->hash = 0xffff; + airoha_ppe_foe_remove_flow(ppe, iter); + continue; + } + + idle = airoha_ppe_get_entry_idle_time(ppe, ib1); + if (idle >= min_idle) + continue; + + min_idle = idle; + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; + e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP; + } +} + +static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe, struct airoha_flow_table_entry *e) { + struct airoha_foe_entry *hwe_p, hwe = {}; + spin_lock_bh(&ppe_lock); - hlist_del_init(&e->list); - if (e->hash != 0xffff) { - e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE; - e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, - AIROHA_FOE_STATE_INVALID); - airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash); + if (e->type == FLOW_TYPE_L2) { + airoha_ppe_foe_flow_l2_entry_update(ppe, e); + goto unlock; + } + + if (e->hash == 0xffff) + goto unlock; + + hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash); + if (!hwe_p) + goto unlock; + + memcpy(&hwe, hwe_p, sizeof(*hwe_p)); + if (!airoha_ppe_foe_compare_entry(e, &hwe)) { e->hash = 0xffff; + goto unlock; } + e->data.ib1 = hwe.ib1; +unlock: spin_unlock_bh(&ppe_lock); } +static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + airoha_ppe_foe_flow_entry_update(ppe, e); + + return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1); +} + static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, struct flow_cls_offload *f) { @@ -751,6 +1072,60 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port, return 0; } +void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, + struct airoha_foe_stats64 *stats) +{ + u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); + struct airoha_eth *eth = ppe->eth; + struct airoha_npu *npu; + + if (index >= PPE_STATS_NUM_ENTRIES) + return; + + rcu_read_lock(); + + npu = rcu_dereference(eth->npu); + if (npu) { + u64 packets = ppe->foe_stats[index].packets; + u64 bytes = ppe->foe_stats[index].bytes; + struct airoha_foe_stats npu_stats; + + memcpy_fromio(&npu_stats, &npu->stats[index], + sizeof(*npu->stats)); + stats->packets = packets << 32 | npu_stats.packets; + stats->bytes = bytes << 32 | npu_stats.bytes; + } + + rcu_read_unlock(); +} + +static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + u32 idle; + + e = rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params); + if (!e) + return -ENOENT; + + idle = airoha_ppe_entry_idle_time(eth->ppe, e); + f->stats.lastused = jiffies - idle * HZ; + + if (e->hash != 0xffff) { + struct airoha_foe_stats64 stats = {}; + + airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats); + f->stats.pkts += (stats.packets - e->stats.packets); + f->stats.bytes += (stats.bytes - e->stats.bytes); + e->stats = stats; + } + + return 0; +} + static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, struct flow_cls_offload *f) { @@ -759,6 +1134,8 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, return airoha_ppe_flow_offload_replace(port, f); case FLOW_CLS_DESTROY: return airoha_ppe_flow_offload_destroy(port, f); + case FLOW_CLS_STATS: + return airoha_ppe_flow_offload_stats(port, f); default: break; } @@ -784,11 +1161,12 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) { - struct airoha_npu *npu = airoha_npu_get(eth->dev); + struct airoha_npu *npu = airoha_npu_get(eth->dev, + ð->ppe->foe_stats_dma); if (IS_ERR(npu)) { request_module("airoha-npu"); - npu = airoha_npu_get(eth->dev); + npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma); } return npu; @@ -811,6 +1189,8 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) if (err) goto error_npu_put; + airoha_ppe_foe_flow_stats_reset(eth->ppe, npu); + rcu_assign_pointer(eth->npu, npu); synchronize_rcu(); @@ -822,18 +1202,13 @@ error_npu_put: return err; } -int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data) { - struct flow_cls_offload *cls = type_data; - struct net_device *dev = cb_priv; struct airoha_gdm_port *port = netdev_priv(dev); + struct flow_cls_offload *cls = type_data; struct airoha_eth *eth = port->qdma->eth; int err = 0; - if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER) - return -EOPNOTSUPP; - mutex_lock(&flow_offload_mutex); if (!eth->npu) @@ -846,7 +1221,8 @@ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return err; } -void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash) +void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb, + u16 hash) { u16 now, diff; @@ -859,7 +1235,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash) return; ppe->foe_check_time[hash] = now; - airoha_ppe_foe_insert_entry(ppe, hash); + airoha_ppe_foe_insert_entry(ppe, skb, hash); } int airoha_ppe_init(struct airoha_eth *eth) @@ -886,13 +1262,33 @@ int airoha_ppe_init(struct airoha_eth *eth) if (!ppe->foe_flow) return -ENOMEM; + foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); + if (foe_size) { + ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, + &ppe->foe_stats_dma, + GFP_KERNEL); + if (!ppe->foe_stats) + return -ENOMEM; + } + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); if (err) return err; + err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params); + if (err) + goto error_flow_table_destroy; + err = airoha_ppe_debugfs_init(ppe); if (err) - rhashtable_destroy(ð->flow_table); + goto error_l2_flow_table_destroy; + + return 0; + +error_l2_flow_table_destroy: + rhashtable_destroy(&ppe->l2_flows); +error_flow_table_destroy: + rhashtable_destroy(ð->flow_table); return err; } @@ -909,6 +1305,7 @@ void airoha_ppe_deinit(struct airoha_eth *eth) } rcu_read_unlock(); + rhashtable_destroy(ð->ppe->l2_flows); rhashtable_destroy(ð->flow_table); debugfs_remove(eth->ppe->debugfs_dir); } diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c index 3cdc6fd53fc7..05a756233f6a 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, u16 *src_port = NULL, *dest_port = NULL; struct airoha_foe_mac_info_common *l2; unsigned char h_source[ETH_ALEN] = {}; + struct airoha_foe_stats64 stats = {}; unsigned char h_dest[ETH_ALEN]; struct airoha_foe_entry *hwe; u32 type, state, ib2, data; @@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, cpu_to_be16(hwe->ipv4.l2.src_mac_lo); } + airoha_ppe_foe_entry_get_stats(ppe, i, &stats); + *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi); *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo); *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi); seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x" - " vlan=%d,%d ib1=%08x ib2=%08x\n", + " vlan=%d,%d ib1=%08x ib2=%08x" + " packets=%llu bytes=%llu\n", h_source, h_dest, l2->etype, data, - l2->vlan1, l2->vlan2, hwe->ib1, ib2); + l2->vlan1, l2->vlan2, hwe->ib1, ib2, + stats.packets, stats.bytes); } return 0; diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index 8146cde4e8ba..d931530fc96f 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -283,6 +283,7 @@ #define PPE_HASH_SEED 0x12345678 #define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248) +#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2)) #define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c) @@ -422,11 +423,12 @@ ((_n) == 2) ? 0x0720 : \ ((_n) == 1) ? 0x0024 : 0x0020) -#define REG_INT_ENABLE(_n) \ - (((_n) == 4) ? 0x0750 : \ - ((_n) == 3) ? 0x0744 : \ - ((_n) == 2) ? 0x0740 : \ - ((_n) == 1) ? 0x002c : 0x0028) +#define REG_INT_ENABLE(_b, _n) \ + (((_n) == 4) ? 0x0750 + ((_b) << 5) : \ + ((_n) == 3) ? 0x0744 + ((_b) << 5) : \ + ((_n) == 2) ? 0x0740 + ((_b) << 5) : \ + ((_n) == 1) ? 0x002c + ((_b) << 3) : \ + 0x0028 + ((_b) << 3)) /* QDMA_CSR_INT_ENABLE1 */ #define RX15_COHERENT_INT_MASK BIT(31) @@ -461,6 +463,26 @@ #define IRQ0_FULL_INT_MASK BIT(1) #define IRQ0_INT_MASK BIT(0) +#define RX_COHERENT_LOW_INT_MASK \ + (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \ + RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \ + RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \ + RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \ + RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \ + RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \ + RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \ + RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK) + +#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK) +#define INT_RX0_MASK(_n) \ + (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK) + +#define TX_COHERENT_LOW_INT_MASK \ + (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \ + TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \ + TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \ + TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK) + #define TX_DONE_INT_MASK(_n) \ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) @@ -469,17 +491,6 @@ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) -#define INT_IDX0_MASK \ - (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ - TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ - TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ - TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ - RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ - RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ - RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ - RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ - RX15_COHERENT_INT_MASK | INT_TX_MASK) - /* QDMA_CSR_INT_ENABLE2 */ #define RX15_NO_CPU_DSCP_INT_MASK BIT(31) #define RX14_NO_CPU_DSCP_INT_MASK BIT(30) @@ -514,19 +525,121 @@ #define RX1_DONE_INT_MASK BIT(1) #define RX0_DONE_INT_MASK BIT(0) -#define RX_DONE_INT_MASK \ - (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ - RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ - RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ - RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ - RX15_DONE_INT_MASK) -#define INT_IDX1_MASK \ - (RX_DONE_INT_MASK | \ - RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ - RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ - RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ - RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ - RX15_NO_CPU_DSCP_INT_MASK) +#define RX_NO_CPU_DSCP_LOW_INT_MASK \ + (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \ + RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \ + RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \ + RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \ + RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \ + RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \ + RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \ + RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK) + +#define RX_DONE_LOW_INT_MASK \ + (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \ + RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \ + RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \ + RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \ + RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \ + RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \ + RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \ + RX1_DONE_INT_MASK | RX0_DONE_INT_MASK) + +#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK) +#define INT_RX1_MASK(_n) \ + ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \ + (RX_DONE_LOW_INT_MASK & (_n))) + +/* QDMA_CSR_INT_ENABLE3 */ +#define RX31_NO_CPU_DSCP_INT_MASK BIT(31) +#define RX30_NO_CPU_DSCP_INT_MASK BIT(30) +#define RX29_NO_CPU_DSCP_INT_MASK BIT(29) +#define RX28_NO_CPU_DSCP_INT_MASK BIT(28) +#define RX27_NO_CPU_DSCP_INT_MASK BIT(27) +#define RX26_NO_CPU_DSCP_INT_MASK BIT(26) +#define RX25_NO_CPU_DSCP_INT_MASK BIT(25) +#define RX24_NO_CPU_DSCP_INT_MASK BIT(24) +#define RX23_NO_CPU_DSCP_INT_MASK BIT(23) +#define RX22_NO_CPU_DSCP_INT_MASK BIT(22) +#define RX21_NO_CPU_DSCP_INT_MASK BIT(21) +#define RX20_NO_CPU_DSCP_INT_MASK BIT(20) +#define RX19_NO_CPU_DSCP_INT_MASK BIT(19) +#define RX18_NO_CPU_DSCP_INT_MASK BIT(18) +#define RX17_NO_CPU_DSCP_INT_MASK BIT(17) +#define RX16_NO_CPU_DSCP_INT_MASK BIT(16) +#define RX31_DONE_INT_MASK BIT(15) +#define RX30_DONE_INT_MASK BIT(14) +#define RX29_DONE_INT_MASK BIT(13) +#define RX28_DONE_INT_MASK BIT(12) +#define RX27_DONE_INT_MASK BIT(11) +#define RX26_DONE_INT_MASK BIT(10) +#define RX25_DONE_INT_MASK BIT(9) +#define RX24_DONE_INT_MASK BIT(8) +#define RX23_DONE_INT_MASK BIT(7) +#define RX22_DONE_INT_MASK BIT(6) +#define RX21_DONE_INT_MASK BIT(5) +#define RX20_DONE_INT_MASK BIT(4) +#define RX19_DONE_INT_MASK BIT(3) +#define RX18_DONE_INT_MASK BIT(2) +#define RX17_DONE_INT_MASK BIT(1) +#define RX16_DONE_INT_MASK BIT(0) + +#define RX_NO_CPU_DSCP_HIGH_INT_MASK \ + (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \ + RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \ + RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \ + RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \ + RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \ + RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \ + RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \ + RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK) + +#define RX_DONE_HIGH_INT_MASK \ + (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \ + RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \ + RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \ + RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \ + RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \ + RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \ + RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \ + RX17_DONE_INT_MASK | RX16_DONE_INT_MASK) + +#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK) +#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK) + +#define INT_RX2_MASK(_n) \ + ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \ + (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK)) + +/* QDMA_CSR_INT_ENABLE4 */ +#define RX31_COHERENT_INT_MASK BIT(31) +#define RX30_COHERENT_INT_MASK BIT(30) +#define RX29_COHERENT_INT_MASK BIT(29) +#define RX28_COHERENT_INT_MASK BIT(28) +#define RX27_COHERENT_INT_MASK BIT(27) +#define RX26_COHERENT_INT_MASK BIT(26) +#define RX25_COHERENT_INT_MASK BIT(25) +#define RX24_COHERENT_INT_MASK BIT(24) +#define RX23_COHERENT_INT_MASK BIT(23) +#define RX22_COHERENT_INT_MASK BIT(22) +#define RX21_COHERENT_INT_MASK BIT(21) +#define RX20_COHERENT_INT_MASK BIT(20) +#define RX19_COHERENT_INT_MASK BIT(19) +#define RX18_COHERENT_INT_MASK BIT(18) +#define RX17_COHERENT_INT_MASK BIT(17) +#define RX16_COHERENT_INT_MASK BIT(16) + +#define RX_COHERENT_HIGH_INT_MASK \ + (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \ + RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \ + RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \ + RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \ + RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \ + RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \ + RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \ + RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK) + +#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n)) /* QDMA_CSR_INT_ENABLE5 */ #define TX31_COHERENT_INT_MASK BIT(31) @@ -554,19 +667,19 @@ #define TX9_COHERENT_INT_MASK BIT(9) #define TX8_COHERENT_INT_MASK BIT(8) -#define INT_IDX4_MASK \ - (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ - TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ - TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ - TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ - TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ - TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ - TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ - TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ - TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ - TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ - TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ - TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) +#define TX_COHERENT_HIGH_INT_MASK \ + (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \ + TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \ + TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \ + TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \ + TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \ + TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \ + TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \ + TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \ + TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \ + TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \ + TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \ + TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK) #define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) @@ -691,6 +804,12 @@ #define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8) #define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc) +#define RATE_LIMIT_PARAM_RW_MASK BIT(31) +#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30) +#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28) +#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26) +#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16) + #define REG_TXWRR_MODE_CFG 0x1020 #define TWRR_WEIGHT_SCALE_MASK BIT(31) #define TWRR_WEIGHT_BASE_MASK BIT(3) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 897720fdf5d8..7d9ee9867a40 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1777,7 +1777,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter, if (ENA_IS_XDP_INDEX(adapter, i)) napi_handler = ena_xdp_io_poll; - netif_napi_add(adapter->netdev, &napi->napi, napi_handler); + netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i); if (!ENA_IS_XDP_INDEX(adapter, i)) napi->rx_ring = rx_ring; diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 506f682d15c1..097bb092bdb8 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -225,7 +225,7 @@ int pdsc_adminq_post(struct pdsc *pdsc, union pds_core_adminq_comp *comp, bool fast_poll) { - unsigned long poll_interval = 1; + unsigned long poll_interval = 200; unsigned long poll_jiffies; unsigned long time_limit; unsigned long time_start; @@ -252,7 +252,7 @@ int pdsc_adminq_post(struct pdsc *pdsc, time_limit = time_start + HZ * pdsc->devcmd_timeout; do { /* Timeslice the actual wait to catch IO errors etc early */ - poll_jiffies = msecs_to_jiffies(poll_interval); + poll_jiffies = usecs_to_jiffies(poll_interval); remaining = wait_for_completion_timeout(wc, poll_jiffies); if (remaining) break; diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index 9512aa4083f0..076dfe2910c7 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -402,6 +402,7 @@ err_out_uninit: static struct pdsc_viftype pdsc_viftype_defaults[] = { [PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR, + .enabled = true, .vif_id = PDS_DEV_TYPE_FWCTL, .dl_id = -1 }, [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR, @@ -414,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc) { enum pds_core_vif_types vt; - pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults), + pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults), + sizeof(*pdsc->viftype_status), GFP_KERNEL); if (!pdsc->viftype_status) return -ENOMEM; @@ -431,9 +433,6 @@ static int pdsc_viftypes_init(struct pdsc *pdsc) /* See what the Core device has for support */ vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]); - if (vt == PDS_DEV_TYPE_FWCTL) - pdsc->viftype_status[vt].enabled = true; - dev_dbg(pdsc->dev, "VIF %s is %ssupported\n", pdsc->viftype_status[vt].name, vt_support ? "" : "not "); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 3b70f6737633..e1296cbf4ff3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #ifndef __XGBE_COMMON_H__ @@ -900,6 +791,11 @@ #define PCS_V2_RV_WINDOW_SELECT 0x1064 #define PCS_V2_YC_WINDOW_DEF 0x18060 #define PCS_V2_YC_WINDOW_SELECT 0x18064 +#define PCS_V3_RN_WINDOW_DEF 0xf8078 +#define PCS_V3_RN_WINDOW_SELECT 0xf807c + +#define PCS_RN_SMN_BASE_ADDR 0x11e00000 +#define PCS_RN_PORT_ADDR_SIZE 0x100000 /* PCS register entry bit positions and sizes */ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index c68ace804e37..1474df5544fa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index b35808d3d07f..d9157c4acde9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/debugfs.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index d41b58fad37b..7c8a19988a52 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include "xgbe.h" diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f1b0fb02b3cd..466b5f6e5578 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/phy.h> @@ -120,9 +11,11 @@ #include <linux/bitrev.h> #include <linux/crc32.h> #include <linux/crc32poly.h> +#include <linux/pci.h> #include "xgbe.h" #include "xgbe-common.h" +#include "xgbe-smn.h" static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) { @@ -1162,18 +1055,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) return 0; } -static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, - int mmd_reg) +static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata, + int mmd_reg) { - unsigned long flags; - unsigned int mmd_address, index, offset; - int mmd_data; - - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + return (mmd_reg & XGBE_ADDR_C45) ? + mmd_reg & ~XGBE_ADDR_C45 : + (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); +} +static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, + unsigned int mmd_address, + unsigned int *index, + unsigned int *offset) +{ /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two @@ -1184,8 +1078,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, * offset 1 bit and reading 16 bits of data. */ mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + *index = mmd_address & ~pdata->xpcs_window_mask; + *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); +} + +static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + u32 smn_address; + int mmd_data; + int ret; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) + return ret; + + ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data); + if (ret) + return ret; + + mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) : + FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + + return mmd_data; +} + +static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + unsigned int pci_mmd_data, hi_mask, lo_mask; + unsigned int mmd_address, index, offset; + struct pci_dev *dev; + u32 smn_address; + int ret; + + dev = pdata->pcidev; + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); + + smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to read data\n"); + return; + } + + if (offset % 4) { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data); + } else { + hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, + FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data)); + lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); + } + + pci_mmd_data = hi_mask | lo_mask; + + ret = amd_smn_write(0, smn_address, index); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", index); + return; + } + + ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data); + if (ret) { + pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data); + return; + } +} + +static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address, index, offset; + unsigned long flags; + int mmd_data; + + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); + + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1201,23 +1185,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, unsigned long flags; unsigned int mmd_address, index, offset; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); - /* The PCS registers are accessed using mmio. The underlying - * management interface uses indirect addressing to access the MMD - * register sets. This requires accessing of the PCS register in two - * phases, an address phase and a data phase. - * - * The mmio interface is based on 16-bit offsets and values. All - * register offsets must therefore be adjusted by left shifting the - * offset 1 bit and writing 16 bits of data. - */ - mmd_address <<= 1; - index = mmd_address & ~pdata->xpcs_window_mask; - offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); @@ -1232,10 +1202,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; int mmd_data; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD @@ -1260,10 +1227,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, unsigned int mmd_address; unsigned long flags; - if (mmd_reg & XGBE_ADDR_C45) - mmd_address = mmd_reg & ~XGBE_ADDR_C45; - else - mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD @@ -1290,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V2: default: return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + + case XGBE_XPCS_ACCESS_V3: + return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg); } } @@ -1300,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, case XGBE_XPCS_ACCESS_V1: return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V3: + return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data); + case XGBE_XPCS_ACCESS_V2: default: return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8e09ad8fa022..76c328721fcd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 4431ab1c18b3..be0d2c7d08dc 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/spinlock.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 7a833894f52a..d40011e8ddf2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 0e8698928e4d..4ebdd123c435 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 07f4f3418d01..71449edbb76d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/interrupt.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index c636999a6a84..097ec5e4f261 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -1,123 +1,15 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/log2.h> +#include "xgbe-smn.h" #include "xgbe.h" #include "xgbe-common.h" @@ -207,14 +99,14 @@ out: static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct xgbe_prv_data *pdata; - struct device *dev = &pdev->dev; void __iomem * const *iomap_table; - struct pci_dev *rdev; + unsigned int port_addr_size, reg; + struct device *dev = &pdev->dev; + struct xgbe_prv_data *pdata; unsigned int ma_lo, ma_hi; - unsigned int reg; - int bar_mask; - int ret; + struct pci_dev *rdev; + int bar_mask, ret; + u32 address; pdata = xgbe_alloc_pdata(dev); if (IS_ERR(pdata)) { @@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Set the PCS indirect addressing definition registers */ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0)); - if (rdev && - (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) { - pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; - } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) && - (rdev->device == 0x14b5)) { - pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; - pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; - - /* Yellow Carp devices do not need cdr workaround */ - pdata->vdata->an_cdr_workaround = 0; - - /* Yellow Carp devices do not need rrc */ - pdata->vdata->enable_rrc = 0; + if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) { + switch (rdev->device) { + case XGBE_RV_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; + break; + case XGBE_YC_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; + + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; + + /* Yellow Carp devices do not need rrc */ + pdata->vdata->enable_rrc = 0; + break; + case XGBE_RN_PCI_DEVICE_ID: + pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT; + break; + default: + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + break; + } } else { pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; @@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_dev_put(rdev); /* Configure the PCS indirect addressing support */ - reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) { + reg = XP_IOREAD(pdata, XP_PROP_0); + port_addr_size = PCS_RN_PORT_ADDR_SIZE * + XP_GET_BITS(reg, XP_PROP_0, PORT_ID); + pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size; + + address = pdata->smn_base + (pdata->xpcs_window_def_reg); + ret = amd_smn_read(0, address, ®); + if (ret) { + pci_err(pdata->pcidev, "Failed to read data\n"); + goto err_pci_enable; + } + } else { + reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + } + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); @@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev) return ret; } +static struct xgbe_version_data xgbe_v3 = { + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, + .xpcs_access = XGBE_XPCS_ACCESS_V3, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .irq_reissue_support = 1, + .tx_desc_prefetch = 5, + .rx_desc_prefetch = 5, + .an_cdr_workaround = 0, + .enable_rrc = 0, +}; + static struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, @@ -510,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = { .driver_data = (kernel_ulong_t)&xgbe_v2a }, { PCI_VDEVICE(AMD, 0x1459), .driver_data = (kernel_ulong_t)&xgbe_v2b }, + { PCI_VDEVICE(AMD, 0x1641), + .driver_data = (kernel_ulong_t)&xgbe_v3 }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c index d16eae415f72..2e6b8ffe785c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 268399dfcf22..7a4dfa4e19c7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4365bd62942c..47d53e59ccf6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/module.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c index 7051bd7cf6dc..978c4dd01fa0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #include <linux/clk.h> diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h new file mode 100644 index 000000000000..c6ae127ced03 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */ +/* + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved + * + * Author: Raju Rangoju <Raju.Rangoju@amd.com> + */ + +#ifndef __SMN_H__ +#define __SMN_H__ + +#ifdef CONFIG_AMD_NB + +#include <asm/amd/nb.h> + +#else + +static inline int amd_smn_write(u16 node, u32 address, u32 value) +{ + return -ENODEV; +} + +static inline int amd_smn_read(u16 node, u32 address, u32 *value) +{ + return -ENODEV; +} + +#endif +#endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ed5d43c16d0e..6359bb87dc13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1,117 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) /* - * AMD 10Gb Ethernet driver - * - * This file is available to you under your choice of the following two - * licenses: - * - * License 1: GPLv2 - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * - * This file is free software; you may copy, redistribute and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or (at - * your option) any later version. - * - * This file is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - * - * - * License 2: Modified BSD - * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Advanced Micro Devices, Inc. nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * This file incorporates work covered by the following copyright and - * permission notice: - * The Synopsys DWC ETHER XGMAC Software Driver and documentation - * (hereinafter "Software") is an unsupported proprietary work of Synopsys, - * Inc. unless otherwise expressly agreed to in writing between Synopsys - * and you. - * - * The Software IS NOT an item of Licensed Software or Licensed Product - * under any End User Software License Agreement or Agreement for Licensed - * Product with Synopsys or any supplement thereto. Permission is hereby - * granted, free of charge, to any person obtaining a copy of this software - * annotated with this license and the Software, to deal in the Software - * without restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is furnished - * to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" - * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A - * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved */ #ifndef __XGBE_H__ @@ -347,6 +238,15 @@ (_src)->link_modes._sname, \ __ETHTOOL_LINK_MODE_MASK_NBITS) +/* XGBE PCI device id */ +#define XGBE_RV_PCI_DEVICE_ID 0x15d0 +#define XGBE_YC_PCI_DEVICE_ID 0x14b5 +#define XGBE_RN_PCI_DEVICE_ID 0x1630 + + /* Generic low and high masks */ +#define XGBE_GEN_HI_MASK GENMASK(31, 16) +#define XGBE_GEN_LO_MASK GENMASK(15, 0) + struct xgbe_prv_data; struct xgbe_packet_data { @@ -565,6 +465,7 @@ enum xgbe_speed { enum xgbe_xpcs_access { XGBE_XPCS_ACCESS_V1 = 0, XGBE_XPCS_ACCESS_V2, + XGBE_XPCS_ACCESS_V3, }; enum xgbe_an_mode { @@ -1060,6 +961,7 @@ struct xgbe_prv_data { struct device *dev; struct platform_device *phy_platdev; struct device *phy_dev; + unsigned int smn_base; /* Version related data */ struct xgbe_version_data *vdata; diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index b9fdd61f1fdb..b50052c25a91 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -20,7 +20,6 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/crc32.h> -#include <linux/crc32poly.h> #include <linux/bitrev.h> #include <linux/ethtool.h> #include <linux/slab.h> @@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) } #ifndef SUNHME_MULTICAST -/* Real fast bit-reversal algorithm, 6-bit values */ -static int reverse6[64] = { - 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, - 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, - 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, - 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, - 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, - 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, - 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, - 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f -}; - -static unsigned int -crc416(unsigned int curval, unsigned short nxtval) -{ - unsigned int counter, cur = curval, next = nxtval; - int high_crc_set, low_data_set; - - /* Swap bytes */ - next = ((next & 0x00FF) << 8) | (next >> 8); - - /* Compute bit-by-bit */ - for (counter = 0; counter < 16; ++counter) { - /* is high CRC bit set? */ - if ((cur & 0x80000000) == 0) high_crc_set = 0; - else high_crc_set = 1; - - cur = cur << 1; - - if ((next & 0x0001) == 0) low_data_set = 0; - else low_data_set = 1; - - next = next >> 1; - - /* do the XOR */ - if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE; - } - return cur; -} - -static unsigned int -bmac_crc(unsigned short *address) -{ - unsigned int newcrc; - - XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); - newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ - newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ - newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ - - return(newcrc); -} - /* * Add requested mcast addr to BMac's hash table filter. * @@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr) unsigned short mask; if (!(*addr)) return; - crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ - crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + crc = crc32(~0, addr, ETH_ALEN) >> 26; if (bp->hash_use_count[crc]++) return; /* This bit is already set */ mask = crc % 16; mask = (unsigned char)1 << mask; @@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr) unsigned char mask; /* Now, delete the address from the filter copy, as indicated */ - crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ - crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + crc = crc32(~0, addr, ETH_ALEN) >> 26; if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ mask = crc % 16; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index c1d1673c5749..b565189e5913 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd } #endif - skb_tx_timestamp(skb); return aq_nic_xmit(aq_nic, skb); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index bf3aa46887a1..e71cd10e4e1f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) frags = aq_nic_map_skb(self, skb, ring); + skb_tx_timestamp(skb); + if (likely(frags)) { err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw, ring, frags); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 1bd4313215d7..636520bb4b8c 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -123,6 +123,7 @@ config TIGON3 tristate "Broadcom Tigon3 support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + select CRC32 select PHYLIB help This driver supports Broadcom Tigon3 based gigabit Ethernet cards. diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index a68fab1b05f0..fd35f4b4dc50 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf) return; } - rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush); + rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH); } static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv, @@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf) int ret, i; /* Write all filters to HW */ - for (i = 0; i < NUM_NET_FILTERS; i++) { + for (i = 0; i < priv->num_net_filters; i++) { /* If the filter does not match the port, skip programming. */ if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) @@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, struct bcmasp_priv *priv = intf->parent; int j = 0, i; - for (i = 0; i < NUM_NET_FILTERS; i++) { + for (i = 0; i < priv->num_net_filters; i++) { if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf) struct bcmasp_priv *priv = intf->parent; int cnt = 0, i; - for (i = 0; i < NUM_NET_FILTERS; i++) { + for (i = 0; i < priv->num_net_filters; i++) { if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf, size_t fs_size = 0; int i; - for (i = 0; i < NUM_NET_FILTERS; i++) { + for (i = 0; i < priv->num_net_filters; i++) { if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, int i, open_index = -1; /* Check whether we exceed the filter table capacity */ - if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS) + if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters) return ERR_PTR(-EINVAL); /* If the filter location is busy (already claimed) and we are initializing @@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, /* Initialize the loop index based on the desired location or from 0 */ i = loc == RX_CLS_LOC_ANY ? 0 : loc; - for ( ; i < NUM_NET_FILTERS; i++) { + for ( ; i < priv->num_net_filters; i++) { /* Found matching network filter */ if (!init && priv->net_filters[i].claimed && @@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en, priv->mda_filters[i].en = en; priv->mda_filters[i].port = intf->port; - rx_filter_core_wl(priv, ((intf->channel + 8) | + rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) | (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) | ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)), ASP_RX_FILTER_MDA_CFG(i)); @@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf) res_count = bcmasp_total_res_mda_cnt(intf->parent); /* Disable all filters held by this port */ - for (i = res_count; i < NUM_MDA_FILTERS; i++) { + for (i = res_count; i < priv->num_mda_filters; i++) { if (priv->mda_filters[i].en && priv->mda_filters[i].port == intf->port) bcmasp_en_mda_filter(intf, 0, i); @@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, res_count = bcmasp_total_res_mda_cnt(intf->parent); - for (i = res_count; i < NUM_MDA_FILTERS; i++) { + for (i = res_count; i < priv->num_mda_filters; i++) { /* If filter not enabled or belongs to another port skip */ if (!priv->mda_filters[i].en || priv->mda_filters[i].port != intf->port) @@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, } /* Create new filter if possible */ - for (i = res_count; i < NUM_MDA_FILTERS; i++) { + for (i = res_count; i < priv->num_mda_filters; i++) { if (priv->mda_filters[i].en) continue; @@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv) /* Disable all filters and reset software view since the HW * can lose context while in deep sleep suspend states */ - for (i = 0; i < NUM_MDA_FILTERS; i++) { + for (i = 0; i < priv->num_mda_filters; i++) { rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i)); priv->mda_filters[i].en = 0; } - for (i = 0; i < NUM_NET_FILTERS; i++) + for (i = 0; i < priv->num_net_filters; i++) rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i)); /* Top level filter enable bit should be enabled at all times, set @@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv) /* ASP core initialization */ static void bcmasp_core_init(struct bcmasp_priv *priv) { - tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL); - rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL); - - rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT), - ASP_EDPKT_HDR_CFG); - rx_edpkt_core_wl(priv, - (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT), - ASP_EDPKT_ENDI); - rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT); rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT); - rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT); rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE); @@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow) ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT); } +static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow) +{ + u32 reg; + + reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT); + if (slow) + reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN; + else + reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN; + ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT); +} + static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set) { u32 reg; @@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i) return irq; } -static void bcmasp_init_wol_shared(struct bcmasp_priv *priv) +static void bcmasp_init_wol(struct bcmasp_priv *priv) { struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; @@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv) device_set_wakeup_capable(&pdev->dev, 1); } -static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en) +void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en) { struct bcmasp_priv *priv = intf->parent; struct device *dev = &priv->pdev->dev; @@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en) } } -static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv) +static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv) { if (priv->wol_irq > 0) free_irq(priv->wol_irq, priv); } -static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - struct bcmasp_intf *intf; - int irq; - - list_for_each_entry(intf, &priv->intfs, list) { - irq = bcmasp_get_and_request_irq(priv, intf->port + 1); - if (irq < 0) { - dev_warn(dev, "Failed to init WoL irq(port %d): %d\n", - intf->port, irq); - continue; - } - - intf->wol_irq = irq; - intf->wol_irq_enabled = false; - device_set_wakeup_capable(&pdev->dev, 1); - } -} - -static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en) -{ - struct device *dev = &intf->parent->pdev->dev; - - if (en ^ intf->wol_irq_enabled) - irq_set_irq_wake(intf->wol_irq, en); - - intf->wol_irq_enabled = en; - device_set_wakeup_enable(dev, en); -} - -static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv) -{ - struct bcmasp_intf *intf; - - list_for_each_entry(intf, &priv->intfs, list) { - if (intf->wol_irq > 0) - free_irq(intf->wol_irq, priv); - } -} - static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en) { u32 reg, phy_lpi_overwrite; @@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en) usleep_range(50, 100); } -static struct bcmasp_hw_info v20_hw_info = { - .rx_ctrl_flush = ASP_RX_CTRL_FLUSH, - .umac2fb = UMAC2FB_OFFSET, - .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT, - .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT, - .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH, -}; - -static const struct bcmasp_plat_data v20_plat_data = { - .init_wol = bcmasp_init_wol_per_intf, - .enable_wol = bcmasp_enable_wol_per_intf, - .destroy_wol = bcmasp_wol_irq_destroy_per_intf, - .core_clock_select = bcmasp_core_clock_select_one, - .hw_info = &v20_hw_info, -}; - -static struct bcmasp_hw_info v21_hw_info = { - .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1, - .umac2fb = UMAC2FB_OFFSET_2_1, - .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1, - .rx_ctrl_fb_filt_out_frame_count = - ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1, - .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1, -}; - static const struct bcmasp_plat_data v21_plat_data = { - .init_wol = bcmasp_init_wol_shared, - .enable_wol = bcmasp_enable_wol_shared, - .destroy_wol = bcmasp_wol_irq_destroy_shared, .core_clock_select = bcmasp_core_clock_select_one, - .hw_info = &v21_hw_info, + .num_mda_filters = 32, + .num_net_filters = 32, + .tx_chan_offset = 8, + .rx_ctrl_offset = 0x0, }; static const struct bcmasp_plat_data v22_plat_data = { - .init_wol = bcmasp_init_wol_shared, - .enable_wol = bcmasp_enable_wol_shared, - .destroy_wol = bcmasp_wol_irq_destroy_shared, .core_clock_select = bcmasp_core_clock_select_many, - .hw_info = &v21_hw_info, .eee_fixup = bcmasp_eee_fixup, + .num_mda_filters = 32, + .num_net_filters = 32, + .tx_chan_offset = 8, + .rx_ctrl_offset = 0x0, +}; + +static const struct bcmasp_plat_data v30_plat_data = { + .core_clock_select = bcmasp_core_clock_select_one_ctrl2, + .num_mda_filters = 20, + .num_net_filters = 16, + .tx_chan_offset = 0, + .rx_ctrl_offset = 0x10000, }; static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata) { - priv->init_wol = pdata->init_wol; - priv->enable_wol = pdata->enable_wol; - priv->destroy_wol = pdata->destroy_wol; priv->core_clock_select = pdata->core_clock_select; priv->eee_fixup = pdata->eee_fixup; - priv->hw_info = pdata->hw_info; + priv->num_mda_filters = pdata->num_mda_filters; + priv->num_net_filters = pdata->num_net_filters; + priv->tx_chan_offset = pdata->tx_chan_offset; + priv->rx_ctrl_offset = pdata->rx_ctrl_offset; } static const struct of_device_id bcmasp_of_match[] = { - { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data }, { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data }, { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data }, + { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bcmasp_of_match); static const struct of_device_id bcmasp_mdio_of_match[] = { - { .compatible = "brcm,asp-v2.2-mdio", }, { .compatible = "brcm,asp-v2.1-mdio", }, - { .compatible = "brcm,asp-v2.0-mdio", }, + { .compatible = "brcm,asp-v2.2-mdio", }, + { .compatible = "brcm,asp-v3.0-mdio", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match); @@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev) * how many interfaces come up. */ bcmasp_core_init(priv); + + priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters, + sizeof(*priv->mda_filters), GFP_KERNEL); + if (!priv->mda_filters) + return -ENOMEM; + + priv->net_filters = devm_kcalloc(dev, priv->num_net_filters, + sizeof(*priv->net_filters), GFP_KERNEL); + if (!priv->net_filters) + return -ENOMEM; + bcmasp_core_init_filters(priv); ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports"); @@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev) } /* Check and enable WoL */ - priv->init_wol(priv); + bcmasp_init_wol(priv); /* Drop the clock reference count now and let ndo_open()/ndo_close() * manage it for us from now on. @@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev) if (ret) { netdev_err(intf->ndev, "failed to register net_device: %d\n", ret); - priv->destroy_wol(priv); + bcmasp_wol_irq_destroy(priv); bcmasp_remove_intfs(priv); goto of_put_exit; } @@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev) if (!priv) return; - priv->destroy_wol(priv); + bcmasp_wol_irq_destroy(priv); bcmasp_remove_intfs(priv); } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h index 8fc75bcedb70..74adfdb50e11 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -53,22 +53,15 @@ #define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14 #define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18 #define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c -/* asp2.1 diverges offsets here */ -/* ASP2.0 */ -#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20 -#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24 -#define ASP_RX_CTRL_FLUSH 0x28 -#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12)) -#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13)) -#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20)) -#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30 -/* ASP2.1 */ -#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20 -#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24 -#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28 -#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c -#define ASP_RX_CTRL_FLUSH_2_1 0x30 -#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38 +#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20 +#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24 +#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28 +#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c +#define ASP_RX_CTRL_FLUSH 0x30 +#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12)) +#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13)) +#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20)) +#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38 #define ASP_RX_FILTER_OFFSET 0x80000 #define ASP_RX_FILTER_BLK_CTRL 0x0 @@ -345,9 +338,6 @@ struct bcmasp_intf { u32 wolopts; u8 sopass[SOPASS_MAX]; - /* Used if per intf wol irq */ - int wol_irq; - unsigned int wol_irq_enabled:1; }; #define NUM_NET_FILTERS 32 @@ -370,21 +360,13 @@ struct bcmasp_mda_filter { u8 mask[ETH_ALEN]; }; -struct bcmasp_hw_info { - u32 rx_ctrl_flush; - u32 umac2fb; - u32 rx_ctrl_fb_out_frame_count; - u32 rx_ctrl_fb_filt_out_frame_count; - u32 rx_ctrl_fb_rx_fifo_depth; -}; - struct bcmasp_plat_data { - void (*init_wol)(struct bcmasp_priv *priv); - void (*enable_wol)(struct bcmasp_intf *intf, bool en); - void (*destroy_wol)(struct bcmasp_priv *priv); void (*core_clock_select)(struct bcmasp_priv *priv, bool slow); void (*eee_fixup)(struct bcmasp_intf *priv, bool en); - struct bcmasp_hw_info *hw_info; + unsigned int num_mda_filters; + unsigned int num_net_filters; + unsigned int tx_chan_offset; + unsigned int rx_ctrl_offset; }; struct bcmasp_priv { @@ -399,18 +381,18 @@ struct bcmasp_priv { int wol_irq; unsigned long wol_irq_enabled_mask; - void (*init_wol)(struct bcmasp_priv *priv); - void (*enable_wol)(struct bcmasp_intf *intf, bool en); - void (*destroy_wol)(struct bcmasp_priv *priv); void (*core_clock_select)(struct bcmasp_priv *priv, bool slow); void (*eee_fixup)(struct bcmasp_intf *intf, bool en); + unsigned int num_mda_filters; + unsigned int num_net_filters; + unsigned int tx_chan_offset; + unsigned int rx_ctrl_offset; void __iomem *base; - struct bcmasp_hw_info *hw_info; struct list_head intfs; - struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS]; + struct bcmasp_mda_filter *mda_filters; /* MAC destination address filters lock */ spinlock_t mda_lock; @@ -418,7 +400,7 @@ struct bcmasp_priv { /* Protects accesses to ASP_CTRL_CLOCK_CTRL */ spinlock_t clk_lock; - struct bcmasp_net_filter net_filters[NUM_NET_FILTERS]; + struct bcmasp_net_filter *net_filters; /* Network filter lock */ struct mutex net_lock; @@ -508,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg); #define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21) #define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19) #define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16) -#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15) -#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14) +#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15) +#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14) #define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12) #define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10) #define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8) @@ -541,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET); BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET); BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET); BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET); -BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET); BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET); BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET); BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET); BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET); +#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \ +static inline u32 name##_core_rl(struct bcmasp_priv *priv, \ + u32 off) \ +{ \ + u32 reg = readl_relaxed(priv->base + priv->name##_offset + \ + (offset) + off); \ + return reg; \ +} \ +static inline void name##_core_wl(struct bcmasp_priv *priv, \ + u32 val, u32 off) \ +{ \ + writel_relaxed(val, priv->base + priv->name##_offset + \ + (offset) + off); \ +} +BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET); + struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, struct device_node *ndev_dn, int i); @@ -599,4 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); +void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en); #endif diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index a537c121d3e2..4381a4cfd8c6 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -10,7 +10,6 @@ #include "bcmasp_intf_defs.h" enum bcmasp_stat_type { - BCMASP_STAT_RX_EDPKT, BCMASP_STAT_RX_CTRL, BCMASP_STAT_RX_CTRL_PER_INTF, BCMASP_STAT_SOFT, @@ -33,8 +32,6 @@ struct bcmasp_stats { .reg_offset = offset, \ } -#define STAT_BCMASP_RX_EDPKT(str, offset) \ - STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset) #define STAT_BCMASP_RX_CTRL(str, offset) \ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset) #define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \ @@ -42,11 +39,6 @@ struct bcmasp_stats { /* Must match the order of struct bcmasp_mib_counters */ static const struct bcmasp_stats bcmasp_gstrings_stats[] = { - /* EDPKT counters */ - STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER), - STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT), - STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT), - STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT), /* ASP RX control */ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac", ASP_RX_CTRL_UMAC_0_FRAME_COUNT), @@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = { #define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats) -static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf, - const struct bcmasp_stats *s) -{ - struct bcmasp_priv *priv = intf->parent; - - if (!strcmp("Frames Out(Buffer)", s->stat_string)) - return priv->hw_info->rx_ctrl_fb_out_frame_count; - - if (!strcmp("Frames Out(Filters)", s->stat_string)) - return priv->hw_info->rx_ctrl_fb_filt_out_frame_count; - - if (!strcmp("RX Buffer FIFO Depth", s->stat_string)) - return priv->hw_info->rx_ctrl_fb_rx_fifo_depth; - - return s->reg_offset; -} - static int bcmasp_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { @@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf) char *p; s = &bcmasp_gstrings_stats[i]; - offset = bcmasp_stat_fixup_offset(intf, s); + offset = s->reg_offset; switch (s->type) { case BCMASP_STAT_SOFT: continue; - case BCMASP_STAT_RX_EDPKT: - val = rx_edpkt_core_rl(intf->parent, offset); - break; case BCMASP_STAT_RX_CTRL: val = rx_ctrl_core_rl(intf->parent, offset); break; @@ -215,7 +187,7 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass)); mutex_lock(&priv->wol_lock); - priv->enable_wol(intf, !!intf->wolopts); + bcmasp_enable_wol(intf, !!intf->wolopts); mutex_unlock(&priv->wol_lock); return 0; @@ -289,7 +261,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd) memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs)); - cmd->data = NUM_NET_FILTERS; + cmd->data = intf->parent->num_net_filters; return 0; } @@ -336,7 +308,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, break; case ETHTOOL_GRXCLSRLALL: err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); - cmd->data = NUM_NET_FILTERS; + cmd->data = intf->parent->num_net_filters; break; default: err = -EOPNOTSUPP; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 45ec1a9214a2..0d61b8580d72 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, case htons(ETH_P_IP): header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); - epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2; + epkt |= PKT_OFFLOAD_EPKT_IP(0); ip_proto = ip_hdr(skb)->protocol; header_cnt += 2; break; case htons(ETH_P_IPV6): header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); - epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2; + epkt |= PKT_OFFLOAD_EPKT_IP(1); ip_proto = ipv6_hdr(skb)->nexthdr; header_cnt += 2; break; @@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, switch (ip_proto) { case IPPROTO_TCP: header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); - epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3; + epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4; header_cnt++; break; case IPPROTO_UDP: header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); - epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3; + epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4; header_cnt++; break; default: @@ -818,9 +818,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf) /* Tx SPB */ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), TX_SPB_CTRL_XF_CTRL2); - tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); - tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL); tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); @@ -1185,7 +1183,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) { /* Per port */ intf->res.umac = priv->base + UMC_OFFSET(intf); - intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb + + intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset + (intf->port * 0x4)); intf->res.rgmii = priv->base + RGMII_OFFSET(intf); @@ -1200,7 +1198,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); } -#define MAX_IRQ_STR_LEN 64 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, struct device_node *ndev_dn, int i) { diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h index ad742612895f..af7418348e81 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h @@ -118,8 +118,7 @@ #define UMC_PSW_MS 0x624 #define UMC_PSW_LS 0x628 -#define UMAC2FB_OFFSET_2_1 0x9f044 -#define UMAC2FB_OFFSET 0x9f03c +#define UMAC2FB_OFFSET 0x9f044 #define UMAC2FB_CFG 0x0 #define UMAC2FB_CFG_OPUT_EN BIT(0) #define UMAC2FB_CFG_VLAN_EN BIT(1) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index a461ec612e95..3e9c57196a39 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) struct phy_device *phy_dev; int err; - phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phy_dev = fixed_phy_register(&fphy_status, NULL); if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return PTR_ERR(phy_dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6afc2ab6fad2..d5495762c945 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -893,9 +893,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnapi->events &= ~BNXT_TX_CMP_EVENT; } -static bool bnxt_separate_head_pool(void) +static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr) { - return PAGE_SIZE > BNXT_RX_PAGE_SIZE; + return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE; } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -919,6 +919,20 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, return page; } +static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, + gfp_t gfp) +{ + netmem_ref netmem; + + netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); + if (!netmem) + return 0; + + *mapping = page_pool_get_dma_addr_netmem(netmem); + return netmem; +} + static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, gfp_t gfp) @@ -999,21 +1013,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) return next; } -static inline int bnxt_alloc_rx_page(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - u16 prod, gfp_t gfp) +static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) { struct rx_bd *rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; struct bnxt_sw_rx_agg_bd *rx_agg_buf; - struct page *page; - dma_addr_t mapping; u16 sw_prod = rxr->rx_sw_agg_prod; unsigned int offset = 0; + dma_addr_t mapping; + netmem_ref netmem; - page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); - - if (!page) + netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp); + if (!netmem) return -ENOMEM; if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) @@ -1023,7 +1035,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); - rx_agg_buf->page = page; + rx_agg_buf->netmem = netmem; rx_agg_buf->offset = offset; rx_agg_buf->mapping = mapping; rxbd->rx_bd_haddr = cpu_to_le64(mapping); @@ -1067,11 +1079,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, p5_tpa = true; for (i = 0; i < agg_bufs; i++) { - u16 cons; - struct rx_agg_cmp *agg; struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; + struct rx_agg_cmp *agg; struct rx_bd *prod_bd; - struct page *page; + netmem_ref netmem; + u16 cons; if (p5_tpa) agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); @@ -1088,11 +1100,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, cons_rx_buf = &rxr->rx_agg_ring[cons]; /* It is possible for sw_prod to be equal to cons, so - * set cons_rx_buf->page to NULL first. + * set cons_rx_buf->netmem to 0 first. */ - page = cons_rx_buf->page; - cons_rx_buf->page = NULL; - prod_rx_buf->page = page; + netmem = cons_rx_buf->netmem; + cons_rx_buf->netmem = 0; + prod_rx_buf->netmem = netmem; prod_rx_buf->offset = cons_rx_buf->offset; prod_rx_buf->mapping = cons_rx_buf->mapping; @@ -1218,29 +1230,35 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return skb; } -static u32 __bnxt_rx_agg_pages(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct skb_shared_info *shinfo, - u16 idx, u32 agg_bufs, bool tpa, - struct xdp_buff *xdp) +static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u16 idx, u32 agg_bufs, bool tpa, + struct sk_buff *skb, + struct xdp_buff *xdp) { struct bnxt_napi *bnapi = cpr->bnapi; - struct pci_dev *pdev = bp->pdev; - struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - u16 prod = rxr->rx_agg_prod; + struct skb_shared_info *shinfo; + struct bnxt_rx_ring_info *rxr; u32 i, total_frag_len = 0; bool p5_tpa = false; + u16 prod; + + rxr = bnapi->rx_ring; + prod = rxr->rx_agg_prod; if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) p5_tpa = true; + if (skb) + shinfo = skb_shinfo(skb); + else + shinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < agg_bufs; i++) { - skb_frag_t *frag = &shinfo->frags[i]; - u16 cons, frag_len; - struct rx_agg_cmp *agg; struct bnxt_sw_rx_agg_bd *cons_rx_buf; - struct page *page; - dma_addr_t mapping; + struct rx_agg_cmp *agg; + u16 cons, frag_len; + netmem_ref netmem; if (p5_tpa) agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); @@ -1251,27 +1269,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; cons_rx_buf = &rxr->rx_agg_ring[cons]; - skb_frag_fill_page_desc(frag, cons_rx_buf->page, - cons_rx_buf->offset, frag_len); - shinfo->nr_frags = i + 1; + if (skb) { + skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem, + cons_rx_buf->offset, + frag_len, BNXT_RX_PAGE_SIZE); + } else { + skb_frag_t *frag = &shinfo->frags[i]; + + skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem, + cons_rx_buf->offset, + frag_len); + shinfo->nr_frags = i + 1; + } __clear_bit(cons, rxr->rx_agg_bmap); - /* It is possible for bnxt_alloc_rx_page() to allocate + /* It is possible for bnxt_alloc_rx_netmem() to allocate * a sw_prod index that equals the cons index, so we * need to clear the cons entry now. */ - mapping = cons_rx_buf->mapping; - page = cons_rx_buf->page; - cons_rx_buf->page = NULL; + netmem = cons_rx_buf->netmem; + cons_rx_buf->netmem = 0; - if (xdp && page_is_pfmemalloc(page)) + if (xdp && netmem_is_pfmemalloc(netmem)) xdp_buff_set_frag_pfmemalloc(xdp); - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { + if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) { + if (skb) { + skb->len -= frag_len; + skb->data_len -= frag_len; + skb->truesize -= BNXT_RX_PAGE_SIZE; + } + --shinfo->nr_frags; - cons_rx_buf->page = page; + cons_rx_buf->netmem = netmem; - /* Update prod since possibly some pages have been + /* Update prod since possibly some netmems have been * allocated already. */ rxr->rx_agg_prod = prod; @@ -1279,8 +1311,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, return 0; } - dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - bp->rx_dir); + page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0, + BNXT_RX_PAGE_SIZE); total_frag_len += frag_len; prod = NEXT_RX_AGG(prod); @@ -1289,32 +1321,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, return total_frag_len; } -static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 idx, - u32 agg_bufs, bool tpa) +static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { - struct skb_shared_info *shinfo = skb_shinfo(skb); u32 total_frag_len = 0; - total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, - agg_bufs, tpa, NULL); + total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, + skb, NULL); if (!total_frag_len) { skb_mark_for_recycle(skb); dev_kfree_skb(skb); return NULL; } - skb->data_len += total_frag_len; - skb->len += total_frag_len; - skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; return skb; } -static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct xdp_buff *xdp, u16 idx, - u32 agg_bufs, bool tpa) +static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct xdp_buff *xdp, u16 idx, + u32 agg_bufs, bool tpa) { struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); u32 total_frag_len = 0; @@ -1322,8 +1350,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, if (!xdp_buff_has_frags(xdp)) shinfo->nr_frags = 0; - total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, - idx, agg_bufs, tpa, xdp); + total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, + NULL, xdp); if (total_frag_len) { xdp_buff_set_frags_flag(xdp); shinfo->nr_frags = agg_bufs; @@ -1895,7 +1923,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); + skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs, + true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ cpr->sw_stats->rx.rx_oom_discards += 1; @@ -2176,9 +2205,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (bnxt_xdp_attached(bp, rxr)) { bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); if (agg_bufs) { - u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, - cp_cons, agg_bufs, - false); + u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp, + cp_cons, + agg_bufs, + false); if (!frag_len) goto oom_next_rx; @@ -2230,7 +2260,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) { if (!xdp_active) { - skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); + skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons, + agg_bufs, false); if (!skb) goto oom_next_rx; } else { @@ -3449,15 +3480,15 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; - struct page *page = rx_agg_buf->page; + netmem_ref netmem = rx_agg_buf->netmem; - if (!page) + if (!netmem) continue; - rx_agg_buf->page = NULL; + rx_agg_buf->netmem = 0; __clear_bit(i, rxr->rx_agg_bmap); - page_pool_recycle_direct(rxr->page_pool, page); + page_pool_recycle_direct_netmem(rxr->page_pool, netmem); } } @@ -3750,7 +3781,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - if (bnxt_separate_head_pool()) + if (bnxt_separate_head_pool(rxr)) page_pool_destroy(rxr->head_pool); rxr->page_pool = rxr->head_pool = NULL; @@ -3781,15 +3812,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.dev = &bp->pdev->dev; pp.dma_dir = bp->rx_dir; pp.max_len = PAGE_SIZE; - pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | + PP_FLAG_ALLOW_UNREADABLE_NETMEM; + pp.queue_idx = rxr->bnapi->index; pool = page_pool_create(&pp); if (IS_ERR(pool)) return PTR_ERR(pool); rxr->page_pool = pool; - if (bnxt_separate_head_pool()) { + rxr->need_head_pool = page_pool_is_unreadable(pool); + if (bnxt_separate_head_pool(rxr)) { pp.pool_size = max(bp->rx_ring_size, 1024); + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pool = page_pool_create(&pp); if (IS_ERR(pool)) goto err_destroy_pp; @@ -4201,6 +4236,8 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp, rxr->page_pool->p.napi = NULL; rxr->page_pool = NULL; + rxr->head_pool->p.napi = NULL; + rxr->head_pool = NULL; memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); ring = &rxr->rx_ring_struct; @@ -4325,16 +4362,16 @@ static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, rxr->rx_prod = prod; } -static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr, - int ring_nr) +static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) { u32 prod; int i; prod = rxr->rx_agg_prod; for (i = 0; i < bp->rx_agg_ring_size; i++) { - if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { + if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", ring_nr, i, bp->rx_ring_size); break; @@ -4375,7 +4412,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) return 0; - bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); + bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr); if (rxr->rx_tpa) { rc = bnxt_alloc_one_tpa_info_data(bp, rxr); @@ -10077,7 +10114,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) struct hwrm_ver_get_input *req; u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; - int rc, len; + int rc, len, max_tmo_secs; rc = hwrm_req_init(bp, req, HWRM_VER_GET); if (rc) @@ -10150,9 +10187,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; if (!bp->hwrm_cmd_max_timeout) bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; - else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) - netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", - bp->hwrm_cmd_max_timeout / 1000); + max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000; +#ifdef CONFIG_DETECT_HUNG_TASK + if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT || + max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) { + netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n", + max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT); + } +#endif if (resp->hwrm_intf_maj_8b >= 1) { bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); @@ -15729,6 +15771,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) clone->rx_agg_prod = 0; clone->rx_sw_agg_prod = 0; clone->rx_next_cons = 0; + clone->need_head_pool = false; rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); if (rc) @@ -15771,7 +15814,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) bnxt_alloc_one_rx_ring_skb(bp, clone, idx); if (bp->flags & BNXT_FLAG_AGG_RINGS) - bnxt_alloc_one_rx_ring_page(bp, clone, idx); + bnxt_alloc_one_rx_ring_netmem(bp, clone, idx); if (bp->flags & BNXT_FLAG_TPA) bnxt_alloc_one_tpa_info_data(bp, clone); @@ -15787,7 +15830,7 @@ err_rxq_info_unreg: xdp_rxq_info_unreg(&clone->xdp_rxq); err_page_pool_destroy: page_pool_destroy(clone->page_pool); - if (bnxt_separate_head_pool()) + if (bnxt_separate_head_pool(clone)) page_pool_destroy(clone->head_pool); clone->page_pool = NULL; clone->head_pool = NULL; @@ -15806,7 +15849,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - if (bnxt_separate_head_pool()) + if (bnxt_separate_head_pool(rxr)) page_pool_destroy(rxr->head_pool); rxr->page_pool = NULL; rxr->head_pool = NULL; @@ -15897,6 +15940,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) rxr->page_pool = clone->page_pool; rxr->head_pool = clone->head_pool; rxr->xdp_rxq = clone->xdp_rxq; + rxr->need_head_pool = clone->need_head_pool; bnxt_copy_rx_ring(bp, rxr, clone); @@ -15982,7 +16026,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); page_pool_disable_direct_recycling(rxr->page_pool); - if (bnxt_separate_head_pool()) + if (bnxt_separate_head_pool(rxr)) page_pool_disable_direct_recycling(rxr->head_pool); if (bp->flags & BNXT_FLAG_SHARED_RINGS) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index bc8b3b7e915d..fda0d3cc6227 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -903,7 +903,7 @@ struct bnxt_sw_rx_bd { }; struct bnxt_sw_rx_agg_bd { - struct page *page; + netmem_ref netmem; unsigned int offset; dma_addr_t mapping; }; @@ -1106,6 +1106,7 @@ struct bnxt_rx_ring_info { unsigned long *rx_agg_bmap; u16 rx_agg_bmap_size; + bool need_head_pool; dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index a000d3f630bd..ce97befd3cb3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -506,9 +506,16 @@ err: start_utc, coredump.total_segs + 1, rc); kfree(coredump.data); - *dump_len += sizeof(struct bnxt_coredump_record); - if (rc == -ENOBUFS) + if (!rc) { + *dump_len += sizeof(struct bnxt_coredump_record); + /* The actual coredump length can be smaller than the FW + * reported length earlier. Use the ethtool provided length. + */ + if (buf_len) + *dump_len = buf_len; + } else if (rc == -ENOBUFS) { netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); + } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h index 15ca51b5d204..fb5f5b063c3d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); #define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) -#define HWRM_CMD_MAX_TIMEOUT 40000U +#define HWRM_CMD_MAX_TIMEOUT 60000U #define SHORT_HWRM_CMD_TIMEOUT 20 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 7564705d6478..84c4812414fd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -149,7 +149,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); struct bnxt_ulp *ulp; - int i = 0; ulp = edev->ulp_tbl; netdev_lock(dev); @@ -165,10 +164,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) synchronize_rcu(); ulp->max_async_event_id = 0; ulp->async_events_bmap = NULL; - while (atomic_read(&ulp->ref_count) != 0 && i < 10) { - msleep(100); - i++; - } mutex_unlock(&edev->en_dev_lock); netdev_unlock(dev); return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 7fa3b8d1ebd2..7b9dd8ebe4bc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -10,9 +10,6 @@ #ifndef BNXT_ULP_H #define BNXT_ULP_H -#define BNXT_ROCE_ULP 0 -#define BNXT_MAX_ULP 1 - #define BNXT_MIN_ROCE_CP_RINGS 2 #define BNXT_MIN_ROCE_STAT_CTXS 1 @@ -50,7 +47,6 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; - atomic_t ref_count; }; struct bnxt_en_dev { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index e675611777b5..4a6d8cb9f970 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -425,9 +425,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) if (prog) { bnxt_set_rx_skb_mode(bp, true); - xdp_features_set_redirect_target(dev, true); + xdp_features_set_redirect_target_locked(dev, true); } else { - xdp_features_clear_redirect_target(dev); + xdp_features_clear_redirect_target_locked(dev); bnxt_set_rx_skb_mode(bp, false); } bp->tx_nr_rings_xdp = tx_xdp; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 73d78dcb774d..fa0077bc67b7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -969,12 +969,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev, /* standard ethtool support functions. */ enum bcmgenet_stat_type { - BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_RTNL = -1, BCMGENET_STAT_MIB_RX, BCMGENET_STAT_MIB_TX, BCMGENET_STAT_RUNT, BCMGENET_STAT_MISC, BCMGENET_STAT_SOFT, + BCMGENET_STAT_SOFT64, }; struct bcmgenet_stats { @@ -984,13 +985,15 @@ struct bcmgenet_stats { enum bcmgenet_stat_type type; /* reg offset from UMAC base for misc counters */ u16 reg_offset; + /* sync for u64 stats counters */ + int syncp_offset; }; -#define STAT_NETDEV(m) { \ +#define STAT_RTNL(m) { \ .stat_string = __stringify(m), \ - .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ - .stat_offset = offsetof(struct net_device_stats, m), \ - .type = BCMGENET_STAT_NETDEV, \ + .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m), \ + .type = BCMGENET_STAT_RTNL, \ } #define STAT_GENET_MIB(str, m, _type) { \ @@ -1000,6 +1003,14 @@ struct bcmgenet_stats { .type = _type, \ } +#define STAT_GENET_SOFT_MIB64(str, s, m) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \ + .stat_offset = offsetof(struct bcmgenet_priv, s.m), \ + .type = BCMGENET_STAT_SOFT64, \ + .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \ +} + #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) @@ -1014,18 +1025,38 @@ struct bcmgenet_stats { } #define STAT_GENET_Q(num) \ - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \ - tx_rings[num].packets), \ - STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \ - tx_rings[num].bytes), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \ - rx_rings[num].bytes), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \ - rx_rings[num].packets), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \ - rx_rings[num].errors), \ - STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \ - rx_rings[num].dropped) + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \ + tx_rings[num].stats64, packets), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \ + tx_rings[num].stats64, bytes), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \ + tx_rings[num].stats64, errors), \ + STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \ + tx_rings[num].stats64, dropped), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \ + rx_rings[num].stats64, bytes), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \ + rx_rings[num].stats64, packets), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \ + rx_rings[num].stats64, errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \ + rx_rings[num].stats64, dropped), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \ + rx_rings[num].stats64, multicast), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \ + rx_rings[num].stats64, missed), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \ + rx_rings[num].stats64, length_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \ + rx_rings[num].stats64, over_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \ + rx_rings[num].stats64, crc_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \ + rx_rings[num].stats64, frame_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \ + rx_rings[num].stats64, fragmented_errors), \ + STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \ + rx_rings[num].stats64, broadcast) /* There is a 0xC gap between the end of RX and beginning of TX stats and then * between the end of TX stats and the beginning of the RX RUNT @@ -1037,15 +1068,20 @@ struct bcmgenet_stats { */ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), - STAT_NETDEV(rx_errors), - STAT_NETDEV(tx_errors), - STAT_NETDEV(rx_dropped), - STAT_NETDEV(tx_dropped), - STAT_NETDEV(multicast), + STAT_RTNL(rx_packets), + STAT_RTNL(tx_packets), + STAT_RTNL(rx_bytes), + STAT_RTNL(tx_bytes), + STAT_RTNL(rx_errors), + STAT_RTNL(tx_errors), + STAT_RTNL(rx_dropped), + STAT_RTNL(tx_dropped), + STAT_RTNL(multicast), + STAT_RTNL(rx_missed_errors), + STAT_RTNL(rx_length_errors), + STAT_RTNL(rx_over_errors), + STAT_RTNL(rx_crc_errors), + STAT_RTNL(rx_frame_errors), /* UniMAC RSV counters */ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), @@ -1133,6 +1169,20 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) +#define BCMGENET_STATS64_ADD(stats, m, v) \ + do { \ + u64_stats_update_begin(&stats->syncp); \ + u64_stats_add(&stats->m, v); \ + u64_stats_update_end(&stats->syncp); \ + } while (0) + +#define BCMGENET_STATS64_INC(stats, m) \ + do { \ + u64_stats_update_begin(&stats->syncp); \ + u64_stats_inc(&stats->m); \ + u64_stats_update_end(&stats->syncp); \ + } while (0) + static void bcmgenet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1216,8 +1266,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) s = &bcmgenet_gstrings_stats[i]; switch (s->type) { - case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_RTNL: case BCMGENET_STAT_SOFT: + case BCMGENET_STAT_SOFT64: continue; case BCMGENET_STAT_RUNT: offset += BCMGENET_STAT_OFFSET; @@ -1255,28 +1306,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, u64 *data) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct rtnl_link_stats64 stats64; + struct u64_stats_sync *syncp; + unsigned int start; int i; if (netif_running(dev)) bcmgenet_update_mib_counters(priv); - dev->netdev_ops->ndo_get_stats(dev); + dev_get_stats(dev, &stats64); for (i = 0; i < BCMGENET_STATS_LEN; i++) { const struct bcmgenet_stats *s; char *p; s = &bcmgenet_gstrings_stats[i]; - if (s->type == BCMGENET_STAT_NETDEV) - p = (char *)&dev->stats; - else - p = (char *)priv; - p += s->stat_offset; - if (sizeof(unsigned long) != sizeof(u32) && - s->stat_sizeof == sizeof(unsigned long)) - data[i] = *(unsigned long *)p; - else - data[i] = *(u32 *)p; + p = (char *)priv; + + if (s->type == BCMGENET_STAT_SOFT64) { + syncp = (struct u64_stats_sync *)(p + s->syncp_offset); + do { + start = u64_stats_fetch_begin(syncp); + data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset)); + } while (u64_stats_fetch_retry(syncp, start)); + } else { + if (s->type == BCMGENET_STAT_RTNL) + p = (char *)&stats64; + + p += s->stat_offset; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; + } } } @@ -1856,6 +1919,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { + struct bcmgenet_tx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int txbds_processed = 0; unsigned int bytes_compl = 0; @@ -1896,8 +1960,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ring->free_bds += txbds_processed; ring->c_index = c_index; - ring->packets += pkts_compl; - ring->bytes += bytes_compl; + u64_stats_update_begin(&stats->syncp); + u64_stats_add(&stats->packets, pkts_compl); + u64_stats_add(&stats->bytes, bytes_compl); + u64_stats_update_end(&stats->syncp); netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), pkts_compl, bytes_compl); @@ -1983,8 +2049,10 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) * the transmit checksum offsets in the descriptors */ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + struct bcmgenet_tx_ring *ring) { + struct bcmgenet_tx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = netdev_priv(dev); struct status_64 *status = NULL; struct sk_buff *new_skb; @@ -2001,7 +2069,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev, if (!new_skb) { dev_kfree_skb_any(skb); priv->mib.tx_realloc_tsb_failed++; - dev->stats.tx_dropped++; + BCMGENET_STATS64_INC(stats, dropped); return NULL; } dev_consume_skb_any(skb); @@ -2089,7 +2157,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) GENET_CB(skb)->bytes_sent = skb->len; /* add the Transmit Status Block */ - skb = bcmgenet_add_tsb(dev, skb); + skb = bcmgenet_add_tsb(dev, skb, ring); if (!skb) { ret = NETDEV_TX_OK; goto out; @@ -2231,6 +2299,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int budget) { + struct bcmgenet_rx_stats64 *stats = &ring->stats64; struct bcmgenet_priv *priv = ring->priv; struct net_device *dev = priv->dev; struct enet_cb *cb; @@ -2253,7 +2322,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, DMA_P_INDEX_DISCARD_CNT_MASK; if (discards > ring->old_discards) { discards = discards - ring->old_discards; - ring->errors += discards; + BCMGENET_STATS64_ADD(stats, missed, discards); ring->old_discards += discards; /* Clear HW register when we reach 75% of maximum 0xFFFF */ @@ -2279,7 +2348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, skb = bcmgenet_rx_refill(priv, cb); if (unlikely(!skb)) { - ring->dropped++; + BCMGENET_STATS64_INC(stats, dropped); goto next; } @@ -2306,8 +2375,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(len > RX_BUF_LENGTH)) { netif_err(priv, rx_status, dev, "oversized packet\n"); - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; + BCMGENET_STATS64_INC(stats, length_errors); dev_kfree_skb_any(skb); goto next; } @@ -2315,7 +2383,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); - ring->errors++; + BCMGENET_STATS64_INC(stats, fragmented_errors); dev_kfree_skb_any(skb); goto next; } @@ -2328,15 +2396,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, DMA_RX_RXER))) { netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", (unsigned int)dma_flag); + u64_stats_update_begin(&stats->syncp); if (dma_flag & DMA_RX_CRC_ERROR) - dev->stats.rx_crc_errors++; + u64_stats_inc(&stats->crc_errors); if (dma_flag & DMA_RX_OV) - dev->stats.rx_over_errors++; + u64_stats_inc(&stats->over_errors); if (dma_flag & DMA_RX_NO) - dev->stats.rx_frame_errors++; + u64_stats_inc(&stats->frame_errors); if (dma_flag & DMA_RX_LG) - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; + u64_stats_inc(&stats->length_errors); + if ((dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER)) == DMA_RX_RXER) + u64_stats_inc(&stats->errors); + u64_stats_update_end(&stats->syncp); dev_kfree_skb_any(skb); goto next; } /* error packet */ @@ -2356,10 +2431,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, /*Finish setting up the received SKB and send it to the kernel*/ skb->protocol = eth_type_trans(skb, priv->dev); - ring->packets++; - ring->bytes += len; + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->packets); + u64_stats_add(&stats->bytes, len); if (dma_flag & DMA_RX_MULT) - dev->stats.multicast++; + u64_stats_inc(&stats->multicast); + else if (dma_flag & DMA_RX_BRDCAST) + u64_stats_inc(&stats->broadcast); + u64_stats_update_end(&stats->syncp); /* Notify kernel */ napi_gro_receive(&ring->napi, skb); @@ -3420,7 +3500,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) netif_trans_update(dev); - dev->stats.tx_errors++; + BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors); netif_tx_wake_all_queues(dev); } @@ -3509,39 +3589,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) +static void bcmgenet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; - unsigned long rx_bytes = 0, rx_packets = 0; - unsigned long rx_errors = 0, rx_dropped = 0; - struct bcmgenet_tx_ring *tx_ring; - struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_stats64 *tx_stats; + struct bcmgenet_rx_stats64 *rx_stats; + u64 rx_length_errors, rx_over_errors; + u64 rx_missed, rx_fragmented_errors; + u64 rx_crc_errors, rx_frame_errors; + u64 tx_errors, tx_dropped; + u64 rx_errors, rx_dropped; + u64 tx_bytes, tx_packets; + u64 rx_bytes, rx_packets; + unsigned int start; unsigned int q; + u64 multicast; for (q = 0; q <= priv->hw_params->tx_queues; q++) { - tx_ring = &priv->tx_rings[q]; - tx_bytes += tx_ring->bytes; - tx_packets += tx_ring->packets; + tx_stats = &priv->tx_rings[q].stats64; + do { + start = u64_stats_fetch_begin(&tx_stats->syncp); + tx_bytes = u64_stats_read(&tx_stats->bytes); + tx_packets = u64_stats_read(&tx_stats->packets); + tx_errors = u64_stats_read(&tx_stats->errors); + tx_dropped = u64_stats_read(&tx_stats->dropped); + } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); + + stats->tx_bytes += tx_bytes; + stats->tx_packets += tx_packets; + stats->tx_errors += tx_errors; + stats->tx_dropped += tx_dropped; } for (q = 0; q <= priv->hw_params->rx_queues; q++) { - rx_ring = &priv->rx_rings[q]; - - rx_bytes += rx_ring->bytes; - rx_packets += rx_ring->packets; - rx_errors += rx_ring->errors; - rx_dropped += rx_ring->dropped; + rx_stats = &priv->rx_rings[q].stats64; + do { + start = u64_stats_fetch_begin(&rx_stats->syncp); + rx_bytes = u64_stats_read(&rx_stats->bytes); + rx_packets = u64_stats_read(&rx_stats->packets); + rx_errors = u64_stats_read(&rx_stats->errors); + rx_dropped = u64_stats_read(&rx_stats->dropped); + rx_missed = u64_stats_read(&rx_stats->missed); + rx_length_errors = u64_stats_read(&rx_stats->length_errors); + rx_over_errors = u64_stats_read(&rx_stats->over_errors); + rx_crc_errors = u64_stats_read(&rx_stats->crc_errors); + rx_frame_errors = u64_stats_read(&rx_stats->frame_errors); + rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors); + multicast = u64_stats_read(&rx_stats->multicast); + } while (u64_stats_fetch_retry(&rx_stats->syncp, start)); + + rx_errors += rx_length_errors; + rx_errors += rx_crc_errors; + rx_errors += rx_frame_errors; + rx_errors += rx_fragmented_errors; + + stats->rx_bytes += rx_bytes; + stats->rx_packets += rx_packets; + stats->rx_errors += rx_errors; + stats->rx_dropped += rx_dropped; + stats->rx_missed_errors += rx_missed; + stats->rx_length_errors += rx_length_errors; + stats->rx_over_errors += rx_over_errors; + stats->rx_crc_errors += rx_crc_errors; + stats->rx_frame_errors += rx_frame_errors; + stats->multicast += multicast; } - - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - dev->stats.rx_bytes = rx_bytes; - dev->stats.rx_packets = rx_packets; - dev->stats.rx_errors = rx_errors; - dev->stats.rx_missed_errors = rx_errors; - dev->stats.rx_dropped = rx_dropped; - return &dev->stats; } static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) @@ -3569,7 +3682,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, - .ndo_get_stats = bcmgenet_get_stats, + .ndo_get_stats64 = bcmgenet_get_stats64, .ndo_change_carrier = bcmgenet_change_carrier, }; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 10c631bbe964..5ec3979779ec 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -155,6 +155,30 @@ struct bcmgenet_mib_counters { u32 tx_realloc_tsb_failed; }; +struct bcmgenet_tx_stats64 { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t errors; + u64_stats_t dropped; +}; + +struct bcmgenet_rx_stats64 { + struct u64_stats_sync syncp; + u64_stats_t bytes; + u64_stats_t packets; + u64_stats_t errors; + u64_stats_t dropped; + u64_stats_t multicast; + u64_stats_t broadcast; + u64_stats_t missed; + u64_stats_t length_errors; + u64_stats_t over_errors; + u64_stats_t crc_errors; + u64_stats_t frame_errors; + u64_stats_t fragmented_errors; +}; + #define UMAC_MIB_START 0x400 #define UMAC_MDIO_CMD 0x614 @@ -515,8 +539,7 @@ struct bcmgenet_skb_cb { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ struct napi_struct napi; /* NAPI per tx queue */ - unsigned long packets; - unsigned long bytes; + struct bcmgenet_tx_stats64 stats64; unsigned int index; /* ring index */ struct enet_cb *cbs; /* tx ring buffer control block*/ unsigned int size; /* size of each tx ring */ @@ -540,10 +563,7 @@ struct bcmgenet_net_dim { struct bcmgenet_rx_ring { struct napi_struct napi; /* Rx NAPI struct */ - unsigned long bytes; - unsigned long packets; - unsigned long errors; - unsigned long dropped; + struct bcmgenet_rx_stats64 stats64; unsigned int index; /* Rx ring index */ struct enet_cb *cbs; /* Rx ring buffer control block */ unsigned int size; /* Rx ring size */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 71c619d2bea5..b6437ba7a2eb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -625,7 +625,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) .asym_pause = 0, }; - phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + phydev = fixed_phy_register(&fphy_status, NULL); if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return PTR_ERR(phydev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d1f541af4e3b..ff47e96b9124 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -54,7 +54,7 @@ #include <linux/ssb/ssb_driver_gige.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -#include <linux/crc32poly.h> +#include <linux/crc32.h> #include <linux/dmi.h> #include <net/checksum.h> @@ -9809,26 +9809,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) static inline u32 calc_crc(unsigned char *buf, int len) { - u32 reg; - u32 tmp; - int j, k; - - reg = 0xffffffff; - - for (j = 0; j < len; j++) { - reg ^= buf[j]; - - for (k = 0; k < 8; k++) { - tmp = reg & 0x01; - - reg >>= 1; - - if (tmp) - reg ^= CRC32_POLY_LE; - } - } - - return ~reg; + return ~crc32(~0, buf, len); } static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index a03eff3d4425..50eb54ecf1ba 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1735,7 +1735,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t) * Time CPU m CPU n * 0 1 = test_bit * 1 clear_bit - * 2 del_timer_sync + * 2 timer_delete_sync * 3 mod_timer */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index e1e8bd2ec155..d1f1ae5ea161 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5283,7 +5283,11 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (err) { + dev_err(&pdev->dev, "failed to set DMA mask\n"); + goto err_out_free_netdev; + } bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 06397cc8bb36..5211759bfe47 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); } - /* Check if timestamp is requested */ - if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - skb_tx_timestamp(skb); + /* Check if hw timestamp is requested */ + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) return; - } /* Tx timestamping not supported along with TSO, so ignore request */ if (skb_shinfo(skb)->gso_size) @@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb, netdev_tx_sent_queue(txq, skb->len); + skb_tx_timestamp(skb); + /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 608cc6af5af1..3b7ad744b2dd 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1605,10 +1605,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); - goto err_disable_device; + goto err_zero_drv_data; } /* MAP configuration registers */ @@ -1616,7 +1616,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!bgx->reg_base) { dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); err = -ENOMEM; - goto err_release_regions; + goto err_zero_drv_data; } set_max_bgx_per_node(pdev); @@ -1688,10 +1688,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_enable: bgx_vnic[bgx->bgx_id] = NULL; pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); -err_release_regions: - pci_release_regions(pdev); -err_disable_device: - pci_disable_device(pdev); +err_zero_drv_data: pci_set_drvdata(pdev, NULL); return err; } @@ -1710,8 +1707,6 @@ static void bgx_remove(struct pci_dev *pdev) pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); bgx_vnic[bgx->bgx_id] = NULL; - pci_release_regions(pdev); - pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 3b7068832f95..4a0e2d2eb60a 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val) adapter->msg_enable = val; } -static const char stats_strings[][ETH_GSTRING_LEN] = { +static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 551c279dc14b..51395c96b2e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6480,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = { #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE) -static int cxgb4_xfrm_add_state(struct xfrm_state *x, +static int cxgb4_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); int ret; if (!mutex_trylock(&uld_mutex)) { @@ -6494,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x, if (ret) goto out_unlock; - ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack); + ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x, + extack); out_unlock: mutex_unlock(&uld_mutex); @@ -6502,9 +6504,9 @@ out_unlock: return ret; } -static void cxgb4_xfrm_del_state(struct xfrm_state *x) +static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); if (!mutex_trylock(&uld_mutex)) { dev_dbg(adap->pdev_dev, @@ -6514,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x) if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) goto out_unlock; - adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x); + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x); out_unlock: mutex_unlock(&uld_mutex); } -static void cxgb4_xfrm_free_state(struct xfrm_state *x) +static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x) { - struct adapter *adap = netdev2adap(x->xso.dev); + struct adapter *adap = netdev2adap(dev); if (!mutex_trylock(&uld_mutex)) { dev_dbg(adap->pdev_dev, @@ -6532,7 +6534,7 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x) if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) goto out_unlock; - adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x); + adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x); out_unlock: mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c index b08356060fb4..7bab8da8f6e6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev, return 0; } -static struct thermal_zone_device_ops cxgb4_thermal_ops = { +static const struct thermal_zone_device_ops cxgb4_thermal_ops = { .get_temp = cxgb4_thermal_get_temp, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index f991a28a71c3..f2d533acb056 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) } else { q = &adap->sge.ethtxq[qidx + pi->first_qset]; } - skb_tx_timestamp(skb); reclaim_completed_tx(adap, &q->q, -1, true); cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; @@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); + skb_tx_timestamp(skb); + if (immediate) { cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); @@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev, d = &eosw_txq->desc[eosw_txq->last_pidx]; skb = d->skb; - skb_tx_timestamp(skb); wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && @@ -2373,6 +2373,7 @@ write_wr_headers: eohw_txq->vlan_ins++; txq_advance(&eohw_txq->q, ndesc); + skb_tx_timestamp(skb); cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index baba96883f48..ecd9a0bd5e18 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -75,9 +75,12 @@ static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); static void ch_ipsec_advance_esn_state(struct xfrm_state *x); -static void ch_ipsec_xfrm_free_state(struct xfrm_state *x); -static void ch_ipsec_xfrm_del_state(struct xfrm_state *x); -static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, +static void ch_ipsec_xfrm_free_state(struct net_device *dev, + struct xfrm_state *x); +static void ch_ipsec_xfrm_del_state(struct net_device *dev, + struct xfrm_state *x); +static int ch_ipsec_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack); static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = { @@ -223,7 +226,8 @@ out: * returns 0 on success, negative error if failed to send message to FPGA * positive error if FPGA returned a bad response */ -static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, +static int ch_ipsec_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { struct ipsec_sa_entry *sa_entry; @@ -302,14 +306,16 @@ out: return res; } -static void ch_ipsec_xfrm_del_state(struct xfrm_state *x) +static void ch_ipsec_xfrm_del_state(struct net_device *dev, + struct xfrm_state *x) { /* do nothing */ if (!x->xso.offload_handle) return; } -static void ch_ipsec_xfrm_free_state(struct xfrm_state *x) +static void ch_ipsec_xfrm_free_state(struct net_device *dev, + struct xfrm_state *x) { struct ipsec_sa_entry *sa_entry; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index e8e460a92e0e..4e2096e49684 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -1640,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info, cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr); sgl_sdesc->skb = skb; chcr_txq_advance(&q->q, ndesc); + skb_tx_timestamp(skb); cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc); return 0; } @@ -1903,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb, th = tcp_hdr(nskb); skb_offset = skb_tcp_all_headers(nskb); data_len = nskb->len - skb_offset; - skb_tx_timestamp(nskb); if (chcr_ktls_tunnel_pkt(tx_info, nskb, q)) goto out; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 9c12e967e9f1..301b3f3114af 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -26,6 +26,7 @@ #define ENIC_WQ_MAX 256 #define ENIC_RQ_MAX 256 +#define ENIC_RQ_MIN_DEFAULT 8 #define ENIC_WQ_NAPI_BUDGET 256 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c753c35b26eb..6ef8a0d90bce 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2296,7 +2296,8 @@ static int enic_adjust_resources(struct enic *enic) * used based on which resource is the most constrained */ wq_avail = min(enic->wq_avail, ENIC_WQ_MAX); - rq_default = netif_get_num_default_rss_queues(); + rq_default = max(netif_get_num_default_rss_queues(), + ENIC_RQ_MIN_DEFAULT); rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default); max_queues = min(enic->cq_avail, enic->intr_avail - ENIC_MSIX_RESERVED_INTR); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 517a15904fb0..6a2004bbe87f 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1144,6 +1144,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, struct gmac_txdesc *txd; skb_frag_t *skb_frag; dma_addr_t mapping; + bool tcp = false; void *buffer; u16 mss; int ret; @@ -1151,6 +1152,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, word1 = skb->len; word3 = SOF_BIT; + /* Determine if we are doing TCP */ + if (skb->protocol == htons(ETH_P_IP)) + tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP); + else + /* IPv6 */ + tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP); + mss = skb_shinfo(skb)->gso_size; if (mss) { /* This means we are dealing with TCP and skb->len is the @@ -1163,8 +1171,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, mss, skb->len); word1 |= TSS_MTU_ENABLE_BIT; word3 |= mss; + } else if (tcp) { + /* Even if we are not using TSO, use the hardware offloader + * for transferring the TCP frame: this hardware has partial + * TCP awareness (called TOE - TCP Offload Engine) and will + * according to the datasheet put packets belonging to the + * same TCP connection in the same queue for the TOE/TSO + * engine to process. The engine will deal with chopping + * up frames that exceed ETH_DATA_LEN which the + * checksumming engine cannot handle (see below) into + * manageable chunks. It flawlessly deals with quite big + * frames and frames containing custom DSA EtherTypes. + */ + mss = netdev->mtu + skb_tcp_all_headers(skb); + mss = min(mss, skb->len); + netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n", + skb->len, netdev->mtu, mss); + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mss; } else if (skb->len >= ETH_FRAME_LEN) { - /* Hardware offloaded checksumming isn't working on frames + /* Hardware offloaded checksumming isn't working on non-TCP frames * bigger than 1514 bytes. A hypothesis about this is that the * checksum buffer is only 1518 bytes, so when the frames get * bigger they get truncated, or the last few bytes get @@ -1181,21 +1207,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, } if (skb->ip_summed == CHECKSUM_PARTIAL) { - int tcp = 0; - /* We do not switch off the checksumming on non TCP/UDP * frames: as is shown from tests, the checksumming engine * is smart enough to see that a frame is not actually TCP * or UDP and then just pass it through without any changes * to the frame. */ - if (skb->protocol == htons(ETH_P_IP)) { + if (skb->protocol == htons(ETH_P_IP)) word1 |= TSS_IP_CHKSUM_BIT; - tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; - } else { /* IPv6 */ + else word1 |= TSS_IPV6_ENABLE_BIT; - tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP; - } word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT; } diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c8c53121557f..bec76e7bf5dd 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1411,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* grab all resources from both PIO and MMIO regions, as we * don't want anyone else messing around with our hardware */ - if (pci_request_regions(pdev, DRV_NAME)) + if (pcim_request_all_regions(pdev, DRV_NAME)) return -ENODEV; ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 5930cdec6f2f..e593273b2867 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); - if (pci_request_regions(pdev, DRV_NAME)) + if (pcim_request_all_regions(pdev, DRV_NAME)) goto err_out_netdev; ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 232e839a9d07..038a0400c1f9 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->ioaddr = ioaddr; np->chip_id = chip_idx; np->pdev = pdev; + + spin_lock_init(&np->stats_lock); spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); @@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - dev->stats.tx_errors++; /* Ttransmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; @@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status) rio_set_led_mode(dev); /* Let TxStartThresh stay default value */ } + + spin_lock(&np->stats_lock); /* Maximum Collisions */ if (tx_status & 0x08) dev->stats.collisions++; + + dev->stats.tx_errors++; + spin_unlock(&np->stats_lock); + /* Restart the Tx */ dw32(MACCtrl, dr16(MACCtrl) | TxEnable); } @@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev) int i; #endif unsigned int stat_reg; + unsigned long flags; + spin_lock_irqsave(&np->stats_lock, flags); /* All statistics registers need to be acknowledged, else statistic overflow could cause problems */ @@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev) dr16(TCPCheckSumErrors); dr16(UDPCheckSumErrors); dr16(IPCheckSumErrors); + + spin_unlock_irqrestore(&np->stats_lock, flags); + return &dev->stats; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 0e33e2eaae96..ba679025e866 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -329,18 +329,18 @@ enum _pcs_anlpar { }; typedef struct t_SROM { - u16 config_param; /* 0x00 */ - u16 asic_ctrl; /* 0x02 */ - u16 sub_vendor_id; /* 0x04 */ - u16 sub_system_id; /* 0x06 */ - u16 pci_base_1; /* 0x08 (IP1000A only) */ - u16 pci_base_2; /* 0x0a (IP1000A only) */ + __le16 config_param; /* 0x00 */ + __le16 asic_ctrl; /* 0x02 */ + __le16 sub_vendor_id; /* 0x04 */ + __le16 sub_system_id; /* 0x06 */ + __le16 pci_base_1; /* 0x08 (IP1000A only) */ + __le16 pci_base_2; /* 0x0a (IP1000A only) */ __le16 led_mode; /* 0x0c (IP1000A only) */ - u16 reserved1[9]; /* 0x0e-0x1f */ + __le16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ u8 sib[204]; /* 0x30-0xfb */ - u32 crc; /* 0xfc-0xff */ + __le32 crc; /* 0xfc-0xff */ } SROM_t, *PSROM_t; /* Ioctl custom data */ @@ -372,6 +372,8 @@ struct netdev_private { struct pci_dev *pdev; void __iomem *ioaddr; void __iomem *eeprom_addr; + // To ensure synchronization when stats are updated. + spinlock_t stats_lock; spinlock_t tx_lock; spinlock_t rx_lock; unsigned int rx_buf_sz; /* Based on MTU+slack. */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 51b8377edd1d..d730af4a50c7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) /* version 1 of the cmd is not supported only by BE2 */ if (BE2_chip(adapter)) hdr->version = 0; - if (BE3_chip(adapter) || lancer_chip(adapter)) + else if (BE3_chip(adapter) || lancer_chip(adapter)) hdr->version = 1; else hdr->version = 2; @@ -2615,7 +2615,11 @@ err: return status; } -static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; +/* + * Since the cookie is text, add a parsing-skipped space to keep it from + * ever being matched on storage holding this source file. + */ +static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** "; static bool phy_flashing_required(struct be_adapter *adapter) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d70818f06be7..5e2d3ddb5d43 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1415,7 +1415,7 @@ struct flash_section_entry { } __packed; struct flash_section_info { - u8 cookie[32]; + u8 cookie[32] __nonstring; struct flash_section_hdr fsec_hdr; struct flash_section_entry fsec_entry[32]; } __packed; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 17ec35e75a65..a98d5af3f9e3 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1906,7 +1906,7 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_phy_connect; } - phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np); + phydev = fixed_phy_register(&ncsi_phy_status, np); if (IS_ERR(phydev)) { dev_err(&pdev->dev, "failed to register fixed PHY device\n"); err = PTR_ERR(phydev); diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index a2d7300925a8..bbef47c3480c 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -71,7 +71,6 @@ config FSL_XGMAC_MDIO tristate "Freescale XGMAC MDIO" select PHYLIB depends on OF - select MDIO_DEVRES select OF_MDIO help This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 4948b4906584..23c23cca2620 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -3089,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n, return nxmit; } -static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int dpaa_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct dpaa_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; + config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; - switch (config.tx_type) { + return 0; +} + +static int dpaa_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct dpaa_priv *priv = netdev_priv(dev); + + switch (config->tx_type) { case HWTSTAMP_TX_OFF: /* Couldn't disable rx/tx timestamping separately. * Do nothing here. @@ -3112,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -ERANGE; } - if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { /* Couldn't disable rx/tx timestamping separately. * Do nothing here. */ @@ -3121,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); priv->rx_tstamp = true; /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } - return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - int ret = -EINVAL; struct dpaa_priv *priv = netdev_priv(net_dev); - if (cmd == SIOCGMIIREG) { - if (net_dev->phydev) - return phylink_mii_ioctl(priv->mac_dev->phylink, rq, - cmd); - } - - if (cmd == SIOCSHWTSTAMP) - return dpaa_ts_ioctl(net_dev, rq, cmd); - - return ret; + return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd); } static const struct net_device_ops dpaa_ops = { @@ -3160,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = { .ndo_change_mtu = dpaa_change_mtu, .ndo_bpf = dpaa_xdp, .ndo_xdp_xmit = dpaa_xdp_xmit, + .ndo_hwtstamp_get = dpaa_hwtstamp_get, + .ndo_hwtstamp_set = dpaa_hwtstamp_set, }; static int dpaa_napi_add(struct net_device *net_dev) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 29886a8ba73f..2ec2c3dab250 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2585,40 +2585,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev, return 0; } -static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int dpaa2_eth_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct dpaa2_eth_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; if (!dpaa2_ptp) return -EINVAL; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: case HWTSTAMP_TX_ONESTEP_SYNC: - priv->tx_tstamp_type = config.tx_type; + priv->tx_tstamp_type = config->tx_type; break; default: return -ERANGE; } - if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { priv->rx_tstamp = false; } else { priv->rx_tstamp = true; /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) dpaa2_ptp_onestep_reg_update_method(priv); - return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; +} + +static int dpaa2_eth_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + + if (!dpaa2_ptp) + return -EINVAL; + + config->tx_type = priv->tx_tstamp_type; + config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -2626,9 +2638,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) struct dpaa2_eth_priv *priv = netdev_priv(dev); int err; - if (cmd == SIOCSHWTSTAMP) - return dpaa2_eth_ts_ioctl(dev, rq, cmd); - mutex_lock(&priv->mac_lock); if (dpaa2_eth_is_type_phy(priv)) { @@ -3034,7 +3043,9 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, - .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid + .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid, + .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get, + .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set, }; static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 6c2779047dcd..e917132d3714 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -15,10 +15,16 @@ config NXP_ENETC_PF_COMMON If compiled as module (M), the module name is nxp-enetc-pf-common. +config NXP_NETC_LIB + tristate + help + This module provides common functionalities for both ENETC and NETC + Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc + flower and debugfs interfaces and so on. + config FSL_ENETC tristate "ENETC PF driver" depends on PCI_MSI - select MDIO_DEVRES select FSL_ENETC_CORE select FSL_ENETC_IERB select FSL_ENETC_MDIO @@ -36,10 +42,10 @@ config FSL_ENETC config NXP_ENETC4 tristate "ENETC4 PF driver" depends on PCI_MSI - select MDIO_DEVRES select FSL_ENETC_CORE select FSL_ENETC_MDIO select NXP_ENETC_PF_COMMON + select NXP_NETC_LIB select PHYLINK select DIMLIB help @@ -73,7 +79,7 @@ config FSL_ENETC_IERB config FSL_ENETC_MDIO tristate "ENETC MDIO driver" - depends on PCI && MDIO_DEVRES && MDIO_BUS + depends on PCI && PHYLIB help This driver supports NXP ENETC Central MDIO controller as a PCIe physical function (PF) device. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile index 6fd27ee4fcd1..f1c5ad45fd76 100644 --- a/drivers/net/ethernet/freescale/enetc/Makefile +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -6,6 +6,9 @@ fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o nxp-enetc-pf-common-y := enetc_pf_common.o +obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o +nxp-netc-lib-y := ntmp.o + obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o fsl-enetc-y := enetc_pf.o fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o @@ -13,6 +16,7 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o nxp-enetc4-y := enetc4_pf.o +nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o fsl-enetc-vf-y := enetc_vf.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 3ee52f4b1166..dcc3fbac3481 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -36,6 +36,42 @@ static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, enetc_mm_commit_preemptible_tcs(priv); } +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} +EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter); + +void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} +EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter); + static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) { int num_tx_rings = priv->num_tx_rings; @@ -2379,7 +2415,7 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) for (i = 0; i < si->num_rss; i++) rss_table[i] = i % num_groups; - enetc_set_rss_table(si, rss_table, si->num_rss); + si->ops->set_rss_table(si, rss_table, si->num_rss); kfree(rss_table); @@ -2394,6 +2430,20 @@ static void enetc_set_lso_flags_mask(struct enetc_hw *hw) enetc_wr(hw, ENETC4_SILSOSFMR1, 0); } +static void enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); +} + int enetc_configure_si(struct enetc_ndev_priv *priv) { struct enetc_si *si = priv->si; @@ -2410,13 +2460,13 @@ int enetc_configure_si(struct enetc_ndev_priv *priv) if (si->hw_features & ENETC_SI_F_LSO) enetc_set_lso_flags_mask(hw); - /* TODO: RSS support for i.MX95 will be supported later, and the - * is_enetc_rev1() condition will be removed - */ - if (si->num_rss && is_enetc_rev1(si)) { + if (si->num_rss) { err = enetc_setup_default_rss_table(si, priv->num_rx_rings); if (err) return err; + + if (priv->ndev->features & NETIF_F_RXHASH) + enetc_set_rss(priv->ndev, true); } return 0; @@ -3209,22 +3259,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev) } EXPORT_SYMBOL_GPL(enetc_get_stats); -static int enetc_set_rss(struct net_device *ndev, int en) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; - u32 reg; - - enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); - - reg = enetc_rd(hw, ENETC_SIMR); - reg &= ~ENETC_SIMR_RSSE; - reg |= (en) ? ENETC_SIMR_RSSE : 0; - enetc_wr(hw, ENETC_SIMR, reg); - - return 0; -} - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -3262,16 +3296,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features) } EXPORT_SYMBOL_GPL(enetc_set_features); -static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) +int enetc_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); int err, new_offloads = priv->active_offloads; - struct hwtstamp_config config; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) + return -EOPNOTSUPP; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; break; @@ -3290,13 +3325,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: new_offloads &= ~ENETC_F_RX_TSTAMP; break; default: new_offloads |= ENETC_F_RX_TSTAMP; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { @@ -3309,42 +3344,36 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) priv->active_offloads = new_offloads; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(enetc_hwtstamp_set); -static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr) +int enetc_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct hwtstamp_config config; - config.flags = 0; + if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) + return -EOPNOTSUPP; if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) - config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC; else if (priv->active_offloads & ENETC_F_TX_TSTAMP) - config.tx_type = HWTSTAMP_TX_ON; + config->tx_type = HWTSTAMP_TX_ON; else - config.tx_type = HWTSTAMP_TX_OFF; + config->tx_type = HWTSTAMP_TX_OFF; - config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(enetc_hwtstamp_get); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) { - if (cmd == SIOCSHWTSTAMP) - return enetc_hwtstamp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return enetc_hwtstamp_get(ndev, rq); - } - if (!priv->phylink) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 4ad4eb5c5a74..872d2cbd088b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -8,6 +8,7 @@ #include <linux/dma-mapping.h> #include <linux/skbuff.h> #include <linux/ethtool.h> +#include <linux/fsl/ntmp.h> #include <linux/if_vlan.h> #include <linux/phylink.h> #include <linux/dim.h> @@ -22,6 +23,18 @@ #define ENETC_CBD_DATA_MEM_ALIGN 64 +#define ENETC_MADDR_HASH_TBL_SZ 64 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; + +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + struct enetc_tx_swbd { union { struct sk_buff *skb; @@ -266,6 +279,19 @@ struct enetc_platform_info { const struct enetc_drvdata *data; }; +struct enetc_si; + +/* + * This structure defines the some common hooks for ENETC PSI and VSI. + * In addition, since VSI only uses the struct enetc_si as its private + * driver data, so this structure also define some hooks specifically + * for VSI. For VSI-specific hooks, the format is ‘vf_*()’. + */ +struct enetc_si_ops { + int (*get_rss_table)(struct enetc_si *si, u32 *table, int count); + int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count); +}; + /* PCI IEP device data */ struct enetc_si { struct pci_dev *pdev; @@ -274,7 +300,10 @@ struct enetc_si { struct net_device *ndev; /* back ref. */ - struct enetc_cbdr cbd_ring; + union { + struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */ + struct ntmp_user ntmp_user; /* ENETC 4.1 and later */ + }; int num_rx_rings; /* how many rings are available in the SI */ int num_tx_rings; @@ -284,6 +313,11 @@ struct enetc_si { u16 revision; int hw_features; const struct enetc_drvdata *drvdata; + const struct enetc_si_ops *ops; + + struct workqueue_struct *workqueue; + struct work_struct rx_mode_task; + struct dentry *debugfs_root; }; #define ENETC_SI_ALIGN 32 @@ -466,6 +500,9 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); void enetc_free_si_resources(struct enetc_ndev_priv *priv); int enetc_configure_si(struct enetc_ndev_priv *priv); int enetc_get_driver_data(struct enetc_si *si); +void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr); +void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter); int enetc_open(struct net_device *ndev); int enetc_close(struct net_device *ndev); @@ -481,6 +518,12 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf); int enetc_xdp_xmit(struct net_device *ndev, int num_frames, struct xdp_frame **frames, u32 flags); +int enetc_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config); +int enetc_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); + /* ethtool */ extern const struct ethtool_ops enetc_pf_ethtool_ops; extern const struct ethtool_ops enetc4_pf_ethtool_ops; @@ -493,15 +536,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count, struct enetc_cbdr *cbdr); void enetc_teardown_cbdr(struct enetc_cbdr *cbdr); +int enetc4_setup_cbdr(struct enetc_si *si); +void enetc4_teardown_cbdr(struct enetc_si *si); int enetc_set_mac_flt_entry(struct enetc_si *si, int index, char *mac_addr, int si_map); int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, int index); -void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes); int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd); +int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count); static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si, struct enetc_cbd *cbd, diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c new file mode 100644 index 000000000000..1b1591dce73d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright 2025 NXP */ + +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/string_choices.h> + +#include "enetc_pf.h" +#include "enetc4_debugfs.h" + +static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i) +{ + struct enetc_si *si = s->private; + struct enetc_hw *hw = &si->hw; + u32 hash_h, hash_l; + + hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i)); + hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i)); + seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n", + i, hash_h, hash_l); + + hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i)); + hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i)); + seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n", + i, hash_h, hash_l); +} + +static int enetc_mac_filter_show(struct seq_file *s, void *data) +{ + struct enetc_si *si = s->private; + struct enetc_hw *hw = &si->hw; + struct maft_entry_data maft; + struct enetc_pf *pf; + int i, err, num_si; + u32 val; + + pf = enetc_si_priv(si); + num_si = pf->caps.num_vsi + 1; + + val = enetc_port_rd(hw, ENETC4_PSIPMMR); + for (i = 0; i < num_si; i++) { + seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i, + str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val)); + seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i, + str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val)); + } + + /* MAC hash filter table */ + for (i = 0; i < num_si; i++) + enetc_show_si_mac_hash_filter(s, i); + + if (!pf->num_mfe) + return 0; + + /* MAC address filter table */ + seq_puts(s, "MAC address filter table\n"); + for (i = 0; i < pf->num_mfe; i++) { + memset(&maft, 0, sizeof(maft)); + err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft); + if (err) + return err; + + seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i, + maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap)); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter); + +void enetc_create_debugfs(struct enetc_si *si) +{ + struct net_device *ndev = si->ndev; + struct dentry *root; + + root = debugfs_create_dir(netdev_name(ndev), NULL); + if (IS_ERR(root)) + return; + + si->debugfs_root = root; + + debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops); +} + +void enetc_remove_debugfs(struct enetc_si *si) +{ + debugfs_remove(si->debugfs_root); + si->debugfs_root = NULL; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h new file mode 100644 index 000000000000..96caca35f79d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2025 NXP */ + +#ifndef __ENETC4_DEBUGFS_H +#define __ENETC4_DEBUGFS_H + +#if IS_ENABLED(CONFIG_DEBUG_FS) +void enetc_create_debugfs(struct enetc_si *si); +void enetc_remove_debugfs(struct enetc_si *si); +#else +static inline void enetc_create_debugfs(struct enetc_si *si) +{ +} + +static inline void enetc_remove_debugfs(struct enetc_si *si) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h index 695cb07c74bc..aa25b445d301 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h @@ -99,6 +99,18 @@ #define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018) #define PSICFGR2_NUM_MSIX GENMASK(5, 0) +/* Port station interface a unicast MAC hash filter register 0/1 */ +#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050) +#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054) + +/* Port station interface a multicast MAC hash filter register 0/1 */ +#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058) +#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c) + +/* Port station interface a VLAN hash filter register 0/1 */ +#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060) +#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064) + #define ENETC4_PMCAPR 0x4004 #define PMCAPR_HD BIT(8) #define PMCAPR_FP GENMASK(10, 9) diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index 73ac8c6afb3a..b3dc1afeefd1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -8,9 +8,19 @@ #include <linux/unaligned.h> #include "enetc_pf_common.h" +#include "enetc4_debugfs.h" #define ENETC_SI_MAX_RING_NUM 8 +#define ENETC_MAC_FILTER_TYPE_UC BIT(0) +#define ENETC_MAC_FILTER_TYPE_MC BIT(1) +#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \ + ENETC_MAC_FILTER_TYPE_MC) + +struct enetc_mac_addr { + u8 addr[ETH_ALEN]; +}; + static void enetc4_get_port_caps(struct enetc_pf *pf) { struct enetc_hw *hw = &pf->si->hw; @@ -26,6 +36,9 @@ static void enetc4_get_port_caps(struct enetc_pf *pf) val = enetc_port_rd(hw, ENETC4_PMCAPR); pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0; + + val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR); + pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE; } static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si, @@ -56,6 +69,200 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si, put_unaligned_le16(lower, addr + 4); } +static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si, + bool uc_promisc, bool mc_promisc) +{ + u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR); + + if (uc_promisc) + val |= PSIPMMR_SI_MAC_UP(si); + else + val &= ~PSIPMMR_SI_MAC_UP(si); + + if (mc_promisc) + val |= PSIPMMR_SI_MAC_MP(si); + else + val &= ~PSIPMMR_SI_MAC_MP(si); + + enetc_port_wr(hw, ENETC4_PSIPMMR, val); +} + +static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si, + u64 hash) +{ + enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash)); +} + +static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si, + u64 hash) +{ + enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash)); + enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash)); +} + +static void enetc4_pf_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + u32 val; + + val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0)); + val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN); + /* Default to select MAC level loopback mode if loopback is enabled. */ + val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0, + PM_CMD_CFG_LPBK_MODE); + + enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val); +} + +static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_mfe; i++) + ntmp_maft_delete_entry(&pf->si->ntmp_user, i); + + pf->num_mfe = 0; +} + +static int enetc4_pf_add_maft_entries(struct enetc_pf *pf, + struct enetc_mac_addr *mac, + int mac_cnt) +{ + struct maft_entry_data maft = {}; + u16 si_bit = BIT(0); + int i, err; + + maft.cfge.si_bitmap = cpu_to_le16(si_bit); + for (i = 0; i < mac_cnt; i++) { + ether_addr_copy(maft.keye.mac_addr, mac[i].addr); + err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft); + if (unlikely(err)) { + pf->num_mfe = i; + goto clear_maft_entries; + } + } + + pf->num_mfe = mac_cnt; + + return 0; + +clear_maft_entries: + enetc4_pf_clear_maft_entries(pf); + + return err; +} + +static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf) +{ + int max_num_mfe = pf->caps.mac_filter_num; + struct enetc_mac_filter mac_filter = {}; + struct net_device *ndev = pf->si->ndev; + struct enetc_hw *hw = &pf->si->hw; + struct enetc_mac_addr *mac_tbl; + struct netdev_hw_addr *ha; + int i = 0, err; + int mac_cnt; + + netif_addr_lock_bh(ndev); + + mac_cnt = netdev_uc_count(ndev); + if (!mac_cnt) { + netif_addr_unlock_bh(ndev); + /* clear both MAC hash and exact filters */ + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + enetc4_pf_clear_maft_entries(pf); + + return 0; + } + + if (mac_cnt > max_num_mfe) { + err = -ENOSPC; + goto unlock_netif_addr; + } + + mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC); + if (!mac_tbl) { + err = -ENOMEM; + goto unlock_netif_addr; + } + + netdev_for_each_uc_addr(ha, ndev) { + enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr); + ether_addr_copy(mac_tbl[i++].addr, ha->addr); + } + + netif_addr_unlock_bh(ndev); + + /* Set temporary unicast hash filters in case of Rx loss when + * updating MAC address filter table + */ + enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table); + enetc4_pf_clear_maft_entries(pf); + + if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i)) + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + + kfree(mac_tbl); + + return 0; + +unlock_netif_addr: + netif_addr_unlock_bh(ndev); + + return err; +} + +static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type) +{ + struct net_device *ndev = pf->si->ndev; + struct enetc_mac_filter *mac_filter; + struct enetc_hw *hw = &pf->si->hw; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(ndev); + if (type & ENETC_MAC_FILTER_TYPE_UC) { + mac_filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(mac_filter); + netdev_for_each_uc_addr(ha, ndev) + enetc_add_mac_addr_ht_filter(mac_filter, ha->addr); + + enetc4_pf_set_si_uc_hash_filter(hw, 0, + *mac_filter->mac_hash_table); + } + + if (type & ENETC_MAC_FILTER_TYPE_MC) { + mac_filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(mac_filter); + netdev_for_each_mc_addr(ha, ndev) + enetc_add_mac_addr_ht_filter(mac_filter, ha->addr); + + enetc4_pf_set_si_mc_hash_filter(hw, 0, + *mac_filter->mac_hash_table); + } + netif_addr_unlock_bh(ndev); +} + +static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type) +{ + /* Currently, the MAC address filter table (MAFT) only has 4 entries, + * and multiple multicast addresses for filtering will be configured + * in the default network configuration, so MAFT is only suitable for + * unicast filtering. If the number of unicast addresses exceeds the + * table capacity, the MAC hash filter will be used. + */ + if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) { + /* Fall back to the MAC hash filter */ + enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC); + /* Clear the old MAC exact filter */ + enetc4_pf_clear_maft_entries(pf); + } + + if (type & ENETC_MAC_FILTER_TYPE_MC) + enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC); +} + static const struct enetc_pf_ops enetc4_pf_ops = { .set_si_primary_mac = enetc4_pf_set_si_primary_mac, .get_si_primary_mac = enetc4_pf_get_si_primary_mac, @@ -226,24 +433,6 @@ static void enetc4_set_trx_frame_size(struct enetc_pf *pf) enetc4_pf_reset_tc_msdu(&si->hw); } -static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes) -{ - int i; - - for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) - enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]); -} - -static void enetc4_set_default_rss_key(struct enetc_pf *pf) -{ - u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0}; - struct enetc_hw *hw = &pf->si->hw; - - /* set up hash key */ - get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); - enetc4_set_rss_key(hw, hash_key); -} - static void enetc4_enable_trx(struct enetc_pf *pf) { struct enetc_hw *hw = &pf->si->hw; @@ -256,10 +445,25 @@ static void enetc4_configure_port(struct enetc_pf *pf) { enetc4_configure_port_si(pf); enetc4_set_trx_frame_size(pf); - enetc4_set_default_rss_key(pf); + enetc_set_default_rss_key(pf); enetc4_enable_trx(pf); } +static int enetc4_init_ntmp_user(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + + /* For ENETC 4.1, all table versions are 0 */ + memset(&user->tbl, 0, sizeof(user->tbl)); + + return enetc4_setup_cbdr(si); +} + +static void enetc4_free_ntmp_user(struct enetc_si *si) +{ + enetc4_teardown_cbdr(si); +} + static int enetc4_pf_init(struct enetc_pf *pf) { struct device *dev = &pf->si->pdev->dev; @@ -272,17 +476,99 @@ static int enetc4_pf_init(struct enetc_pf *pf) return err; } + err = enetc4_init_ntmp_user(pf->si); + if (err) { + dev_err(dev, "Failed to init CBDR\n"); + return err; + } + enetc4_configure_port(pf); return 0; } +static void enetc4_pf_free(struct enetc_pf *pf) +{ + enetc4_free_ntmp_user(pf->si); +} + +static void enetc4_psi_do_set_rx_mode(struct work_struct *work) +{ + struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task); + struct enetc_pf *pf = enetc_si_priv(si); + struct net_device *ndev = si->ndev; + struct enetc_hw *hw = &si->hw; + bool uc_promisc = false; + bool mc_promisc = false; + int type = 0; + + rtnl_lock(); + + if (ndev->flags & IFF_PROMISC) { + uc_promisc = true; + mc_promisc = true; + } else if (ndev->flags & IFF_ALLMULTI) { + mc_promisc = true; + type = ENETC_MAC_FILTER_TYPE_UC; + } else { + type = ENETC_MAC_FILTER_TYPE_ALL; + } + + enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc); + + if (uc_promisc) { + enetc4_pf_set_si_uc_hash_filter(hw, 0, 0); + enetc4_pf_clear_maft_entries(pf); + } + + if (mc_promisc) + enetc4_pf_set_si_mc_hash_filter(hw, 0, 0); + + /* Set new MAC filter */ + enetc4_pf_set_mac_filter(pf, type); + + rtnl_unlock(); +} + +static void enetc4_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_si *si = priv->si; + + queue_work(si->workqueue, &si->rx_mode_task); +} + +static int enetc4_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER); + + enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en); + } + + if (changed & NETIF_F_LOOPBACK) + enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + enetc_set_features(ndev, features); + + return 0; +} + static const struct net_device_ops enetc4_ndev_ops = { .ndo_open = enetc_open, .ndo_stop = enetc_close, .ndo_start_xmit = enetc_xmit, .ndo_get_stats = enetc_get_stats, .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc4_pf_set_rx_mode, + .ndo_set_features = enetc4_pf_set_features, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, }; static struct phylink_pcs * @@ -617,6 +903,19 @@ static void enetc4_link_deinit(struct enetc_ndev_priv *priv) enetc_mdiobus_destroy(pf); } +static int enetc4_psi_wq_task_init(struct enetc_si *si) +{ + char wq_name[24]; + + INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode); + snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev)); + si->workqueue = create_singlethread_workqueue(wq_name); + if (!si->workqueue) + return -ENOMEM; + + return 0; +} + static int enetc4_pf_netdev_create(struct enetc_si *si) { struct device *dev = &si->pdev->dev; @@ -657,6 +956,12 @@ static int enetc4_pf_netdev_create(struct enetc_si *si) if (err) goto err_link_init; + err = enetc4_psi_wq_task_init(si); + if (err) { + dev_err(dev, "Failed to init workqueue\n"); + goto err_wq_init; + } + err = register_netdev(ndev); if (err) { dev_err(dev, "Failed to register netdev\n"); @@ -666,6 +971,8 @@ static int enetc4_pf_netdev_create(struct enetc_si *si) return 0; err_reg_netdev: + destroy_workqueue(si->workqueue); +err_wq_init: enetc4_link_deinit(priv); err_link_init: enetc_free_msix(priv); @@ -683,11 +990,18 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si) struct net_device *ndev = si->ndev; unregister_netdev(ndev); + cancel_work(&si->rx_mode_task); + destroy_workqueue(si->workqueue); enetc4_link_deinit(priv); enetc_free_msix(priv); free_netdev(ndev); } +static const struct enetc_si_ops enetc4_psi_ops = { + .get_rss_table = enetc4_get_rss_table, + .set_rss_table = enetc4_set_rss_table, +}; + static int enetc4_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -712,6 +1026,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev, "Couldn't map PF only space\n"); si->revision = enetc_get_ip_revision(&si->hw); + si->ops = &enetc4_psi_ops; err = enetc_get_driver_data(si); if (err) return dev_err_probe(dev, err, @@ -728,14 +1043,28 @@ static int enetc4_pf_probe(struct pci_dev *pdev, enetc_get_si_caps(si); - return enetc4_pf_netdev_create(si); + err = enetc4_pf_netdev_create(si); + if (err) + goto err_netdev_create; + + enetc_create_debugfs(si); + + return 0; + +err_netdev_create: + enetc4_pf_free(pf); + + return err; } static void enetc4_pf_remove(struct pci_dev *pdev) { struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + enetc_remove_debugfs(si); enetc4_pf_netdev_destroy(si); + enetc4_pf_free(pf); } static const struct pci_device_id enetc4_pf_id_table[] = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c index 20bfdf7fb4b4..3d5f31879d5c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr) } EXPORT_SYMBOL_GPL(enetc_teardown_cbdr); +int enetc4_setup_cbdr(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct netc_cbdr_regs regs; + + user->cbdr_num = 1; + user->dev = dev; + user->ring = devm_kcalloc(dev, user->cbdr_num, + sizeof(struct netc_cbdr), GFP_KERNEL); + if (!user->ring) + return -ENOMEM; + + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + regs.pir = hw->reg + ENETC_SICBDRPIR; + regs.cir = hw->reg + ENETC_SICBDRCIR; + regs.mr = hw->reg + ENETC_SICBDRMR; + regs.bar0 = hw->reg + ENETC_SICBDRBAR0; + regs.bar1 = hw->reg + ENETC_SICBDRBAR1; + regs.lenr = hw->reg + ENETC_SICBDRLENR; + + return ntmp_init_cbdr(user->ring, dev, ®s); +} +EXPORT_SYMBOL_GPL(enetc4_setup_cbdr); + +void enetc4_teardown_cbdr(struct enetc_si *si) +{ + struct ntmp_user *user = &si->ntmp_user; + + ntmp_free_cbdr(user->ring); + user->dev = NULL; +} +EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr); + static void enetc_clean_cbdr(struct enetc_cbdr *ring) { struct enetc_cbd *dest_cbd; @@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) return enetc_cmd_rss_table(si, (u32 *)table, count, false); } EXPORT_SYMBOL_GPL(enetc_set_rss_table); + +int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return ntmp_rsst_query_entry(&si->ntmp_user, table, count); +} +EXPORT_SYMBOL_GPL(enetc4_get_rss_table); + +int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return ntmp_rsst_update_entry(&si->ntmp_user, table, count); +} +EXPORT_SYMBOL_GPL(enetc4_set_rss_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index ece3ae28ba82..d38cd36be4a6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -141,7 +141,7 @@ static const struct { static const struct { int reg; - char name[ETH_GSTRING_LEN]; + char name[ETH_GSTRING_LEN] __nonstring; } enetc_port_counters[] = { { ENETC_PM_REOCT(0), "MAC rx ethernet octets" }, { ENETC_PM_RALN(0), "MAC rx alignment errors" }, @@ -264,7 +264,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) break; for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) - ethtool_puts(&data, enetc_port_counters[i].name); + ethtool_cpy(&data, enetc_port_counters[i].name); break; } @@ -625,6 +625,29 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, return 0; } +/* i.MX95 ENETC does not support RFS table, but we can use ingress port + * filter table to implement Wake-on-LAN filter or drop the matched flow, + * so the implementation will be different from enetc_get_rxnfc() and + * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF. + */ +static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + return enetc_get_rsshash(rxnfc); + default: + return -EOPNOTSUPP; + } + + return 0; +} + static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -677,36 +700,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) return priv->si->num_rss; } +static int enetc_get_rss_key_base(struct enetc_si *si) +{ + if (is_enetc_rev1(si)) + return ENETC_PRSSK(0); + + return ENETC4_PRSSKR(0); +} + +static void enetc_get_rss_key(struct enetc_si *si, const u8 *key) +{ + int base = enetc_get_rss_key_base(si); + struct enetc_hw *hw = &si->hw; + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4); +} + static int enetc_get_rxfh(struct net_device *ndev, struct ethtool_rxfh_param *rxfh) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; - int err = 0, i; + struct enetc_si *si = priv->si; + int err = 0; /* return hash function */ rxfh->hfunc = ETH_RSS_HASH_TOP; /* return hash key */ - if (rxfh->key && hw->port) - for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) - ((u32 *)rxfh->key)[i] = enetc_port_rd(hw, - ENETC_PRSSK(i)); + if (rxfh->key && enetc_si_is_pf(si)) + enetc_get_rss_key(si, rxfh->key); /* return RSS table */ if (rxfh->indir) - err = enetc_get_rss_table(priv->si, rxfh->indir, - priv->si->num_rss); + err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss); return err; } -void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes) { + int base = enetc_get_rss_key_base(si); + struct enetc_hw *hw = &si->hw; int i; for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) - enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); + enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]); } EXPORT_SYMBOL_GPL(enetc_set_rss_key); @@ -715,17 +755,16 @@ static int enetc_set_rxfh(struct net_device *ndev, struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_hw *hw = &priv->si->hw; + struct enetc_si *si = priv->si; int err = 0; /* set hash key, if PF */ - if (rxfh->key && hw->port) - enetc_set_rss_key(hw, rxfh->key); + if (rxfh->key && enetc_si_is_pf(si)) + enetc_set_rss_key(si, rxfh->key); /* set RSS table */ if (rxfh->indir) - err = enetc_set_rss_table(priv->si, rxfh->indir, - priv->si->num_rss); + err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss); return err; } @@ -1240,6 +1279,11 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = { .set_wol = enetc_set_wol, .get_pauseparam = enetc_get_pauseparam, .set_pauseparam = enetc_set_pauseparam, + .get_rxnfc = enetc4_get_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, }; void enetc_set_ethtool_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 203862ec1114..f63a29e2e031 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -72,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) enetc_port_wr(hw, ENETC_PSIVLANR(si), val); } -static int enetc_mac_addr_hash_idx(const u8 *addr) -{ - u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; - u64 mask = 0; - int res = 0; - int i; - - for (i = 0; i < 8; i++) - mask |= BIT_ULL(i * 6); - - for (i = 0; i < 6; i++) - res |= (hweight64(fold & (mask << i)) & 0x1) << i; - - return res; -} - -static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) -{ - filter->mac_addr_cnt = 0; - - bitmap_zero(filter->mac_hash_table, - ENETC_MADDR_HASH_TBL_SZ); -} - static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, const unsigned char *addr) { @@ -104,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, filter->mac_addr_cnt++; } -static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, - const unsigned char *addr) -{ - int idx = enetc_mac_addr_hash_idx(addr); - - /* add hash table entry */ - __set_bit(idx, filter->mac_hash_table); - filter->mac_addr_cnt++; -} - static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) { bool err = si->errata & ENETC_ERR_UCMCSWP; @@ -250,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev) enetc_port_wr(hw, ENETC_PSIPMR, psipmr); } -static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, - unsigned long hash) -{ - enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash)); - enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash)); -} - -static int enetc_vid_hash_idx(unsigned int vid) -{ - int res = 0; - int i; - - for (i = 0; i < 6; i++) - res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; - - return res; -} - -static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) -{ - int i; - - if (rehash) { - bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); - - for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { - int hidx = enetc_vid_hash_idx(i); - - __set_bit(hidx, pf->vlan_ht_filter); - } - } - - enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter); -} - -static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_pf *pf = enetc_si_priv(priv->si); - int idx; - - __set_bit(vid, pf->active_vlans); - - idx = enetc_vid_hash_idx(vid); - if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) - enetc_sync_vlan_ht_filter(pf, false); - - return 0; -} - -static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct enetc_pf *pf = enetc_si_priv(priv->si); - - __clear_bit(vid, pf->active_vlans); - enetc_sync_vlan_ht_filter(pf, true); - - return 0; -} - static void enetc_set_loopback(struct net_device *ndev, bool en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -549,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en) static void enetc_configure_port(struct enetc_pf *pf) { - u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; struct enetc_hw *hw = &pf->si->hw; enetc_configure_port_mac(pf->si); @@ -557,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf) enetc_port_si_configure(pf->si); /* set up hash key */ - get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); - enetc_set_rss_key(hw, hash_key); + enetc_set_default_rss_key(pf); /* split up RFS entries */ enetc_port_assign_rfs_entries(pf->si); @@ -728,6 +631,8 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_setup_tc = enetc_pf_setup_tc, .ndo_bpf = enetc_setup_bpf, .ndo_xdp_xmit = enetc_xdp_xmit, + .ndo_hwtstamp_get = enetc_hwtstamp_get, + .ndo_hwtstamp_set = enetc_hwtstamp_set, }; static struct phylink_pcs * @@ -939,6 +844,11 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev) return enetc_ierb_register_pf(ierb_pdev, pdev); } +static const struct enetc_si_ops enetc_psi_ops = { + .get_rss_table = enetc_get_rss_table, + .set_rss_table = enetc_set_rss_table, +}; + static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) { struct enetc_si *si; @@ -958,6 +868,7 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev) } si->revision = enetc_get_ip_revision(&si->hw); + si->ops = &enetc_psi_ops; err = enetc_get_driver_data(si); if (err) { dev_err(&pdev->dev, "Could not get PF driver data\n"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h index a26a12863855..ae407e9e9ee7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -5,19 +5,8 @@ #include <linux/phylink.h> #define ENETC_PF_NUM_RINGS 8 - -enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; #define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) -#define ENETC_MADDR_HASH_TBL_SZ 64 -struct enetc_mac_filter { - union { - char mac_addr[ETH_ALEN]; - DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); - }; - int mac_addr_cnt; -}; - #define ENETC_VLAN_HT_SIZE 64 enum enetc_vf_flags { @@ -34,6 +23,7 @@ struct enetc_port_caps { int num_msix; int num_rx_bdr; int num_tx_bdr; + int mac_filter_num; }; struct enetc_pf; @@ -71,6 +61,8 @@ struct enetc_pf { struct enetc_port_caps caps; const struct enetc_pf_ops *ops; + + int num_mfe; /* number of mac address filter table entries */ }; #define phylink_to_enetc_pf(config) \ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c index 3fd9b0727875..edf14a95cab7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c @@ -128,14 +128,14 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, if (si->hw_features & ENETC_SI_F_LSO) priv->active_offloads |= ENETC_F_LSO; - /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ - if (!is_enetc_rev1(si)) { - ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK); - goto end; + if (si->num_rss) { + ndev->hw_features |= NETIF_F_RXHASH; + ndev->features |= NETIF_F_RXHASH; } - if (si->num_rss) - ndev->hw_features |= NETIF_F_RXHASH; + /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ + if (!is_enetc_rev1(si)) + goto end; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | @@ -341,5 +341,86 @@ void enetc_phylink_destroy(struct enetc_ndev_priv *priv) } EXPORT_SYMBOL_GPL(enetc_phylink_destroy); +void enetc_set_default_rss_key(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0}; + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(pf->si, hash_key); +} +EXPORT_SYMBOL_GPL(enetc_set_default_rss_key); + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf) +{ + int i; + + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } +} + +static void enetc_set_si_vlan_ht_filter(struct enetc_si *si, + int si_id, u64 hash) +{ + struct enetc_hw *hw = &si->hw; + int high_reg_off, low_reg_off; + + if (is_enetc_rev1(si)) { + low_reg_off = ENETC_PSIVHFR0(si_id); + high_reg_off = ENETC_PSIVHFR1(si_id); + } else { + low_reg_off = ENETC4_PSIVHFR0(si_id); + high_reg_off = ENETC4_PSIVHFR1(si_id); + } + + enetc_port_wr(hw, low_reg_off, lower_32_bits(hash)); + enetc_port_wr(hw, high_reg_off, upper_32_bits(hash)); +} + +int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter); + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid); + +int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (__test_and_clear_bit(vid, pf->active_vlans)) { + enetc_refresh_vlan_ht_filter(pf); + enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter); + } + + return 0; +} +EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid); + MODULE_DESCRIPTION("NXP ENETC PF common functionality driver"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h index 48f55ee743ad..96d4840a3107 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h @@ -12,6 +12,9 @@ void enetc_mdiobus_destroy(struct enetc_pf *pf); int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node, const struct phylink_mac_ops *ops); void enetc_phylink_destroy(struct enetc_ndev_priv *priv); +void enetc_set_default_rss_key(struct enetc_pf *pf); +int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid); +int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid); static inline u16 enetc_get_ip_revision(struct enetc_hw *hw) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 3768752b6008..6c4b374bcb0e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -121,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_features = enetc_vf_set_features, .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_vf_setup_tc, + .ndo_hwtstamp_get = enetc_hwtstamp_get, + .ndo_hwtstamp_set = enetc_hwtstamp_set, }; static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, @@ -155,13 +157,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - if (si->num_rss) + if (si->num_rss) { ndev->hw_features |= NETIF_F_RXHASH; + ndev->features |= NETIF_F_RXHASH; + } /* pick up primary MAC address from SI */ enetc_load_primary_mac_addr(&si->hw, ndev); } +static const struct enetc_si_ops enetc_vsi_ops = { + .get_rss_table = enetc_get_rss_table, + .set_rss_table = enetc_set_rss_table, +}; + static int enetc_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -176,6 +185,7 @@ static int enetc_vf_probe(struct pci_dev *pdev, si = pci_get_drvdata(pdev); si->revision = ENETC_REV_1_0; + si->ops = &enetc_vsi_ops; err = enetc_get_driver_data(si); if (err) { dev_err_probe(&pdev->dev, err, diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c new file mode 100644 index 000000000000..ba32c1bbd9e1 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/ntmp.c @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * NETC NTMP (NETC Table Management Protocol) 2.0 Library + * Copyright 2025 NXP + */ + +#include <linux/dma-mapping.h> +#include <linux/fsl/netc_global.h> +#include <linux/iopoll.h> + +#include "ntmp_private.h" + +#define NETC_CBDR_TIMEOUT 1000 /* us */ +#define NETC_CBDR_DELAY_US 10 +#define NETC_CBDR_MR_EN BIT(31) + +#define NTMP_BASE_ADDR_ALIGN 128 +#define NTMP_DATA_ADDR_ALIGN 32 + +/* Define NTMP Table ID */ +#define NTMP_MAFT_ID 1 +#define NTMP_RSST_ID 3 + +/* Generic Update Actions for most tables */ +#define NTMP_GEN_UA_CFGEU BIT(0) +#define NTMP_GEN_UA_STSEU BIT(1) + +#define NTMP_ENTRY_ID_SIZE 4 +#define RSST_ENTRY_NUM 64 +#define RSST_STSE_DATA_SIZE(n) ((n) * 8) +#define RSST_CFGE_DATA_SIZE(n) (n) + +int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev, + const struct netc_cbdr_regs *regs) +{ + int cbd_num = NETC_CBDR_BD_NUM; + size_t size; + + size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN; + cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base, + GFP_KERNEL); + if (!cbdr->addr_base) + return -ENOMEM; + + cbdr->dma_size = size; + cbdr->bd_num = cbd_num; + cbdr->regs = *regs; + cbdr->dev = dev; + + /* The base address of the Control BD Ring must be 128 bytes aligned */ + cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN); + cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base, + NTMP_BASE_ADDR_ALIGN); + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + spin_lock_init(&cbdr->ring_lock); + + /* Step 1: Configure the base address of the Control BD Ring */ + netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align)); + netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align)); + + /* Step 2: Configure the producer index register */ + netc_write(cbdr->regs.pir, cbdr->next_to_clean); + + /* Step 3: Configure the consumer index register */ + netc_write(cbdr->regs.cir, cbdr->next_to_use); + + /* Step4: Configure the number of BDs of the Control BD Ring */ + netc_write(cbdr->regs.lenr, cbdr->bd_num); + + /* Step 5: Enable the Control BD Ring */ + netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN); + + return 0; +} +EXPORT_SYMBOL_GPL(ntmp_init_cbdr); + +void ntmp_free_cbdr(struct netc_cbdr *cbdr) +{ + /* Disable the Control BD Ring */ + netc_write(cbdr->regs.mr, 0); + dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base, + cbdr->dma_base); + memset(cbdr, 0, sizeof(*cbdr)); +} +EXPORT_SYMBOL_GPL(ntmp_free_cbdr); + +static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr) +{ + return (cbdr->next_to_clean - cbdr->next_to_use - 1 + + cbdr->bd_num) % cbdr->bd_num; +} + +static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index) +{ + return &((union netc_cbd *)(cbdr->addr_base_align))[index]; +} + +static void ntmp_clean_cbdr(struct netc_cbdr *cbdr) +{ + union netc_cbd *cbd; + int i; + + i = cbdr->next_to_clean; + while (netc_read(cbdr->regs.cir) != i) { + cbd = ntmp_get_cbd(cbdr, i); + memset(cbd, 0, sizeof(*cbd)); + i = (i + 1) % cbdr->bd_num; + } + + cbdr->next_to_clean = i; +} + +static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd) +{ + union netc_cbd *cur_cbd; + struct netc_cbdr *cbdr; + int i, err; + u16 status; + u32 val; + + /* Currently only i.MX95 ENETC is supported, and it only has one + * command BD ring + */ + cbdr = &user->ring[0]; + + spin_lock_bh(&cbdr->ring_lock); + + if (unlikely(!ntmp_get_free_cbd_num(cbdr))) + ntmp_clean_cbdr(cbdr); + + i = cbdr->next_to_use; + cur_cbd = ntmp_get_cbd(cbdr, i); + *cur_cbd = *cbd; + dma_wmb(); + + /* Update producer index of both software and hardware */ + i = (i + 1) % cbdr->bd_num; + cbdr->next_to_use = i; + netc_write(cbdr->regs.pir, i); + + err = read_poll_timeout_atomic(netc_read, val, val == i, + NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT, + true, cbdr->regs.cir); + if (unlikely(err)) + goto cbdr_unlock; + + dma_rmb(); + /* Get the writeback command BD, because the caller may need + * to check some other fields of the response header. + */ + *cbd = *cur_cbd; + + /* Check the writeback error status */ + status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR; + if (unlikely(status)) { + err = -EIO; + dev_err(user->dev, "Command BD error: 0x%04x\n", status); + } + + ntmp_clean_cbdr(cbdr); + dma_wmb(); + +cbdr_unlock: + spin_unlock_bh(&cbdr->ring_lock); + + return err; +} + +static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align) +{ + void *buf; + + buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, + &data->dma, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + data->buf = buf; + *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN); + + return 0; +} + +static void ntmp_free_data_mem(struct ntmp_dma_buf *data) +{ + dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN, + data->buf, data->dma); +} + +static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma, + int len, int table_id, int cmd, + int access_method) +{ + dma_addr_t dma_align; + + memset(cbd, 0, sizeof(*cbd)); + dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN); + cbd->req_hdr.addr = cpu_to_le64(dma_align); + cbd->req_hdr.len = cpu_to_le32(len); + cbd->req_hdr.cmd = cmd; + cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD, + access_method); + cbd->req_hdr.table_id = table_id; + cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION, + NTMP_HDR_VER2); + /* For NTMP version 2.0 or later version */ + cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF); +} + +static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv, + u8 qa, u16 ua) +{ + crd->update_act = cpu_to_le16(ua); + crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa); +} + +static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv, + u8 qa, u16 ua, u32 entry_id) +{ + ntmp_fill_crd(&rbe->crd, tblv, qa, ua); + rbe->entry_id = cpu_to_le32(entry_id); +} + +static const char *ntmp_table_name(int tbl_id) +{ + switch (tbl_id) { + case NTMP_MAFT_ID: + return "MAC Address Filter Table"; + case NTMP_RSST_ID: + return "RSS Table"; + default: + return "Unknown Table"; + }; +} + +static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id, + u8 tbl_ver, u32 entry_id, u32 req_len, + u32 resp_len) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = max(req_len, resp_len), + }; + struct ntmp_req_by_eid *req; + union netc_cbd cbd; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id); + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len), + tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID); + + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, + "Failed to delete entry 0x%x of %s, err: %pe", + entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} + +static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id, + u32 len, struct ntmp_req_by_eid *req, + dma_addr_t dma, bool compare_eid) +{ + struct ntmp_cmn_resp_query *resp; + int cmd = NTMP_CMD_QUERY; + union netc_cbd cbd; + u32 entry_id; + int err; + + entry_id = le32_to_cpu(req->entry_id); + if (le16_to_cpu(req->crd.update_act)) + cmd = NTMP_CMD_QU; + + /* Request header */ + ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) { + dev_err(user->dev, + "Failed to query entry 0x%x of %s, err: %pe\n", + entry_id, ntmp_table_name(tbl_id), ERR_PTR(err)); + return err; + } + + /* For a few tables, the first field of their response data is not + * entry_id, so directly return success. + */ + if (!compare_eid) + return 0; + + resp = (struct ntmp_cmn_resp_query *)req; + if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) { + dev_err(user->dev, + "%s: query EID 0x%x doesn't match response EID 0x%x\n", + ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id)); + return -EIO; + } + + return 0; +} + +int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id, + struct maft_entry_data *maft) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = sizeof(struct maft_req_add), + }; + struct maft_req_add *req; + union netc_cbd cbd; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set mac address filter table request data buffer */ + ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id); + req->keye = maft->keye; + req->cfge = maft->cfge; + + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), + NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n", + entry_id, ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_maft_add_entry); + +int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id, + struct maft_entry_data *maft) +{ + struct ntmp_dma_buf data = { + .dev = user->dev, + .size = sizeof(struct maft_resp_query), + }; + struct maft_resp_query *resp; + struct ntmp_req_by_eid *req; + int err; + + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id); + err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID, + NTMP_LEN(sizeof(*req), data.size), + req, data.dma, true); + if (err) + goto end; + + resp = (struct maft_resp_query *)req; + maft->keye = resp->keye; + maft->cfge = resp->cfge; + +end: + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_maft_query_entry); + +int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id) +{ + return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver, + entry_id, NTMP_EID_REQ_LEN, 0); +} +EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry); + +int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table, + int count) +{ + struct ntmp_dma_buf data = {.dev = user->dev}; + struct rsst_req_update *req; + union netc_cbd cbd; + int err, i; + + if (count != RSST_ENTRY_NUM) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + data.size = struct_size(req, groups, count); + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set the request data buffer */ + ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0, + NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0); + for (i = 0; i < count; i++) + req->groups[i] = (u8)(table[i]); + + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0), + NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID); + + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) + dev_err(user->dev, "Failed to update RSST entry, err: %pe\n", + ERR_PTR(err)); + + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry); + +int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count) +{ + struct ntmp_dma_buf data = {.dev = user->dev}; + struct ntmp_req_by_eid *req; + union netc_cbd cbd; + int err, i; + u8 *group; + + if (count != RSST_ENTRY_NUM) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) + + RSST_CFGE_DATA_SIZE(count); + err = ntmp_alloc_data_mem(&data, (void **)&req); + if (err) + return err; + + /* Set the request data buffer */ + ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0); + ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size), + NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID); + err = netc_xmit_ntmp_cmd(user, &cbd); + if (err) { + dev_err(user->dev, "Failed to query RSST entry, err: %pe\n", + ERR_PTR(err)); + goto end; + } + + group = (u8 *)req; + group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count); + for (i = 0; i < count; i++) + table[i] = group[i]; + +end: + ntmp_free_data_mem(&data); + + return err; +} +EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry); + +MODULE_DESCRIPTION("NXP NETC Library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h new file mode 100644 index 000000000000..34394e40fddd --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * NTMP table request and response data buffer formats + * Copyright 2025 NXP + */ + +#ifndef __NTMP_PRIVATE_H +#define __NTMP_PRIVATE_H + +#include <linux/bitfield.h> +#include <linux/fsl/ntmp.h> + +#define NTMP_EID_REQ_LEN 8 +#define NETC_CBDR_BD_NUM 256 + +union netc_cbd { + struct { + __le64 addr; + __le32 len; +#define NTMP_RESP_LEN GENMASK(19, 0) +#define NTMP_REQ_LEN GENMASK(31, 20) +#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \ + ((resp) & NTMP_RESP_LEN)) + u8 cmd; +#define NTMP_CMD_DELETE BIT(0) +#define NTMP_CMD_UPDATE BIT(1) +#define NTMP_CMD_QUERY BIT(2) +#define NTMP_CMD_ADD BIT(3) +#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE) + u8 access_method; +#define NTMP_ACCESS_METHOD GENMASK(7, 4) +#define NTMP_AM_ENTRY_ID 0 +#define NTMP_AM_EXACT_KEY 1 +#define NTMP_AM_SEARCH 2 +#define NTMP_AM_TERNARY_KEY 3 + u8 table_id; + u8 ver_cci_rr; +#define NTMP_HDR_VERSION GENMASK(5, 0) +#define NTMP_HDR_VER2 2 +#define NTMP_CCI BIT(6) +#define NTMP_RR BIT(7) + __le32 resv[3]; + __le32 npf; +#define NTMP_NPF BIT(15) + } req_hdr; /* NTMP Request Message Header Format */ + + struct { + __le32 resv0[3]; + __le16 num_matched; + __le16 error_rr; +#define NTMP_RESP_ERROR GENMASK(11, 0) +#define NTMP_RESP_RR BIT(15) + __le32 resv1[4]; + } resp_hdr; /* NTMP Response Message Header Format */ +}; + +struct ntmp_dma_buf { + struct device *dev; + size_t size; + void *buf; + dma_addr_t dma; +}; + +struct ntmp_cmn_req_data { + __le16 update_act; + u8 dbg_opt; + u8 tblv_qact; +#define NTMP_QUERY_ACT GENMASK(3, 0) +#define NTMP_TBL_VER GENMASK(7, 4) +#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \ + ((a) & NTMP_QUERY_ACT)) +}; + +struct ntmp_cmn_resp_query { + __le32 entry_id; +}; + +/* Generic structure for request data by entry ID */ +struct ntmp_req_by_eid { + struct ntmp_cmn_req_data crd; + __le32 entry_id; +}; + +/* MAC Address Filter Table Request Data Buffer Format of Add action */ +struct maft_req_add { + struct ntmp_req_by_eid rbe; + struct maft_keye_data keye; + struct maft_cfge_data cfge; +}; + +/* MAC Address Filter Table Response Data Buffer Format of Query action */ +struct maft_resp_query { + __le32 entry_id; + struct maft_keye_data keye; + struct maft_cfge_data cfge; +}; + +/* RSS Table Request Data Buffer Format of Update action */ +struct rsst_req_update { + struct ntmp_req_by_eid rbe; + u8 groups[]; +}; + +#endif diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index deb35b38c976..bcbcad613512 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2061,15 +2061,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue) schedule_work(&priv->reset_task); } -static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +static int gfar_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: priv->hwts_tx_en = 0; break; @@ -2082,7 +2080,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: if (priv->hwts_rx_en) { priv->hwts_rx_en = 0; @@ -2096,44 +2094,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) priv->hwts_rx_en = 1; reset_gfar(netdev); } - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } -static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +static int gfar_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); - config.flags = 0; - config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = (priv->hwts_rx_en ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct phy_device *phydev = dev->phydev; + config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; - if (!netif_running(dev)) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) - return gfar_hwtstamp_set(dev, rq); - if (cmd == SIOCGHWTSTAMP) - return gfar_hwtstamp_get(dev, rq); - - if (!phydev) - return -ENODEV; - - return phy_mii_ioctl(phydev, rq, cmd); + return 0; } /* Interrupt Handler for Transmit complete */ @@ -3174,7 +3151,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_set_features = gfar_set_features, .ndo_set_rx_mode = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, - .ndo_eth_ioctl = gfar_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_get_stats64 = gfar_get_stats64, .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = gfar_set_mac_addr, @@ -3182,6 +3159,8 @@ static const struct net_device_ops gfar_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, #endif + .ndo_hwtstamp_get = gfar_hwtstamp_get, + .ndo_hwtstamp_set = gfar_hwtstamp_set, }; /* Set up the ethernet device structure, private data, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index eae1a7595a69..3c1da0cf3f61 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -67,7 +67,7 @@ static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" }; -static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { +static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = { "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts", "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt", "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt", @@ -113,7 +113,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) i); for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++) - ethtool_puts(&s, gve_gstrings_adminq_stats[i]); + ethtool_cpy(&s, gve_gstrings_adminq_stats[i]); break; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index c3791cf23c87..e1ffbd561fac 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1830,7 +1830,7 @@ static void gve_turndown(struct gve_priv *priv) /* Stop tx queues */ netif_tx_disable(priv->dev); - xdp_features_clear_redirect_target(priv->dev); + xdp_features_clear_redirect_target_locked(priv->dev); gve_clear_napi_enabled(priv); gve_clear_report_stats(priv); @@ -1902,7 +1902,7 @@ static void gve_turnup(struct gve_priv *priv) } if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv)) - xdp_features_set_redirect_target(priv->dev, false); + xdp_features_set_redirect_target_locked(priv->dev, false); gve_set_napi_enabled(priv); } @@ -2185,7 +2185,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv) xdp_features = 0; } - xdp_set_features_flag(priv->dev, xdp_features); + xdp_set_features_flag_locked(priv->dev, xdp_features); } static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) @@ -2659,6 +2659,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto abort_with_wq; + if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) + dev->netmem_tx = true; + err = register_netdev(dev); if (err) goto abort_with_gve_init; diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 2eba868d8037..a27f1574a733 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -660,7 +660,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, goto err; dma_unmap_len_set(pkt, len[pkt->num_bufs], len); - dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt, + dma[pkt->num_bufs], addr); ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, @@ -1038,8 +1039,9 @@ static void gve_unmap_packet(struct device *dev, dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); for (i = 1; i < pkt->num_bufs; i++) { - dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); + netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE, 0); } pkt->num_bufs = 0; } diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig index c05fce15eb51..7d0feb1da158 100644 --- a/drivers/net/ethernet/huawei/Kconfig +++ b/drivers/net/ethernet/huawei/Kconfig @@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI if NET_VENDOR_HUAWEI source "drivers/net/ethernet/huawei/hinic/Kconfig" +source "drivers/net/ethernet/huawei/hinic3/Kconfig" endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile index 2549ad5afe6d..59865b882879 100644 --- a/drivers/net/ethernet/huawei/Makefile +++ b/drivers/net/ethernet/huawei/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_HINIC) += hinic/ +obj-$(CONFIG_HINIC3) += hinic3/ diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig new file mode 100644 index 000000000000..ce4331d1387b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Huawei driver configuration +# + +config HINIC3 + tristate "Huawei 3rd generation network adapters (HINIC3) support" + # Fields of HW and management structures are little endian and are + # currently not converted + depends on !CPU_BIG_ENDIAN + depends on X86 || ARM64 || COMPILE_TEST + depends on PCI_MSI && 64BIT + select AUXILIARY_BUS + select PAGE_POOL + help + This driver supports HiNIC 3rd gen Network Adapter (HINIC3). + The driver is supported on X86_64 and ARM64 little endian. + + To compile this driver as a module, choose M here. + The module will be called hinic3. diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile new file mode 100644 index 000000000000..509dfbfb0e96 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +obj-$(CONFIG_HINIC3) += hinic3.o + +hinic3-objs := hinic3_common.o \ + hinic3_hw_cfg.o \ + hinic3_hw_comm.o \ + hinic3_hwdev.o \ + hinic3_hwif.o \ + hinic3_irq.o \ + hinic3_lld.o \ + hinic3_main.o \ + hinic3_mbox.o \ + hinic3_netdev_ops.o \ + hinic3_nic_cfg.o \ + hinic3_nic_io.o \ + hinic3_queue_common.o \ + hinic3_rx.o \ + hinic3_tx.o \ + hinic3_wq.o diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c new file mode 100644 index 000000000000..0aa42068728c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> +#include <linux/dma-mapping.h> + +#include "hinic3_common.h" + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align) +{ + dma_addr_t paddr, align_paddr; + void *vaddr, *align_vaddr; + u32 real_size = size; + + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = vaddr + (align_paddr - paddr); + +out: + mem_align->real_size = real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align) +{ + dma_free_coherent(dev, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h new file mode 100644 index 000000000000..bb795dace04c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_COMMON_H_ +#define _HINIC3_COMMON_H_ + +#include <linux/device.h> + +#define HINIC3_MIN_PAGE_SIZE 0x1000 + +struct hinic3_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align); +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c new file mode 100644 index 000000000000..87d9450c30ca --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/device.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.supp_svcs_bitmap & + BIT(HINIC3_SERVICE_T_NIC); +} + +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs; +} + +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.port_id; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h new file mode 100644 index 000000000000..e017b1ae9f05 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_CFG_H_ +#define _HINIC3_HW_CFG_H_ + +#include <linux/mutex.h> +#include <linux/pci.h> + +struct hinic3_hwdev; + +struct hinic3_irq { + u32 irq_id; + u16 msix_entry_idx; + bool allocated; +}; + +struct hinic3_irq_info { + struct hinic3_irq *irq; + u16 num_irq; + /* device max irq number */ + u16 num_irq_hw; + /* protect irq alloc and free */ + struct mutex irq_mutex; +}; + +struct hinic3_nic_service_cap { + u16 max_sqs; +}; + +/* Device capabilities */ +struct hinic3_dev_cap { + /* Bitmasks of services supported by device */ + u16 supp_svcs_bitmap; + /* Physical port */ + u8 port_id; + struct hinic3_nic_service_cap nic_svc_cap; +}; + +struct hinic3_cfg_mgmt_info { + struct hinic3_irq_info irq_info; + struct hinic3_dev_cap cap; +}; + +int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, + struct msix_entry *alloc_arr, u16 *act_num); +void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id); + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev); +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev); +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c new file mode 100644 index 000000000000..434696ce7dc2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag) +{ + struct comm_cmd_func_reset func_reset = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + + mgmt_msg_params_init_default(&msg_params, &func_reset, + sizeof(func_reset)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_FUNC_RESET, &msg_params); + if (err || func_reset.head.status) { + dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n", + reset_flag, err, func_reset.head.status); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h new file mode 100644 index 000000000000..c33a1c77da9c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_COMM_H_ +#define _HINIC3_HW_COMM_H_ + +#include "hinic3_hw_intf.h" + +struct hinic3_hwdev; + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h new file mode 100644 index 000000000000..22c84093efa2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_INTF_H_ +#define _HINIC3_HW_INTF_H_ + +#include <linux/bits.h> +#include <linux/types.h> + +#define MGMT_MSG_CMD_OP_SET 1 +#define MGMT_MSG_CMD_OP_GET 0 + +#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4 +#define MGMT_STATUS_EXIST 0x6 +#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF + +#define MGMT_MSG_POLLING_TIMEOUT 0 + +struct mgmt_msg_head { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +struct mgmt_msg_params { + const void *buf_in; + u32 in_size; + void *buf_out; + u32 expected_out_size; + u32 timeout_ms; +}; + +/* CMDQ MODULE_TYPE */ +enum mgmt_mod_type { + /* HW communication module */ + MGMT_MOD_COMM = 0, + /* L2NIC module */ + MGMT_MOD_L2NIC = 1, + /* Configuration module */ + MGMT_MOD_CFGM = 7, + MGMT_MOD_HILINK = 14, +}; + +static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params, + void *inout_buf, u32 buf_size) +{ + msg_params->buf_in = inout_buf; + msg_params->buf_out = inout_buf; + msg_params->in_size = buf_size; + msg_params->expected_out_size = buf_size; + msg_params->timeout_ms = 0; +} + +/* COMM Commands between Driver to fw */ +enum comm_cmd { + /* Commands for clearing FLR and resources */ + COMM_CMD_FUNC_RESET = 0, + COMM_CMD_FEATURE_NEGO = 1, + COMM_CMD_FLUSH_DOORBELL = 2, + COMM_CMD_START_FLUSH = 3, + COMM_CMD_GET_GLOBAL_ATTR = 5, + COMM_CMD_SET_FUNC_SVC_USED_STATE = 7, + + /* Driver Configuration Commands */ + COMM_CMD_SET_CMDQ_CTXT = 20, + COMM_CMD_SET_VAT = 21, + COMM_CMD_CFG_PAGESIZE = 22, + COMM_CMD_CFG_MSIX_CTRL_REG = 23, + COMM_CMD_SET_CEQ_CTRL_REG = 24, + COMM_CMD_SET_DMA_ATTR = 25, +}; + +enum comm_func_reset_bits { + COMM_FUNC_RESET_BIT_FLUSH = BIT(0), + COMM_FUNC_RESET_BIT_MQM = BIT(1), + COMM_FUNC_RESET_BIT_SMF = BIT(2), + COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3), + + COMM_FUNC_RESET_BIT_COMM = BIT(10), + /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11), + /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12), + COMM_FUNC_RESET_BIT_NIC = BIT(13), +}; + +struct comm_cmd_func_reset { + struct mgmt_msg_head head; + u16 func_id; + u16 rsvd1[3]; + u64 reset_flag; +}; + +#define COMM_MAX_FEATURE_QWORD 4 +struct comm_cmd_feature_nego { + struct mgmt_msg_head head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[COMM_MAX_FEATURE_QWORD]; +}; + +/* Services supported by HW. HW uses these values when delivering events. + * HW supports multiple services that are not yet supported by driver + * (e.g. RoCE). + */ +enum hinic3_service_type { + HINIC3_SERVICE_T_NIC = 0, + /* MAX is only used by SW for array sizes. */ + HINIC3_SERVICE_T_MAX = 1, +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c new file mode 100644 index 000000000000..6e8788a64925 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_mgmt.h" + +int hinic3_init_hwdev(struct pci_dev *pdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h new file mode 100644 index 000000000000..62e2745e9316 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWDEV_H_ +#define _HINIC3_HWDEV_H_ + +#include <linux/auxiliary_bus.h> +#include <linux/pci.h> + +#include "hinic3_hw_intf.h" + +struct hinic3_cmdqs; +struct hinic3_hwif; + +enum hinic3_event_service_type { + HINIC3_EVENT_SRV_COMM = 0, + HINIC3_EVENT_SRV_NIC = 1 +}; + +#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type)) + +/* driver-specific data of pci_dev */ +struct hinic3_pcidev { + struct pci_dev *pdev; + struct hinic3_hwdev *hwdev; + /* Auxiliary devices */ + struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *db_base; + u64 db_dwqe_len; + u64 db_base_phy; + + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + unsigned long state; +}; + +struct hinic3_hwdev { + struct hinic3_pcidev *adapter; + struct pci_dev *pdev; + struct device *dev; + int dev_id; + struct hinic3_hwif *hwif; + struct hinic3_cfg_mgmt_info *cfg_mgmt; + struct hinic3_aeqs *aeqs; + struct hinic3_ceqs *ceqs; + struct hinic3_mbox *mbox; + struct hinic3_cmdqs *cmdqs; + struct workqueue_struct *workq; + /* protect channel init and uninit */ + spinlock_t channel_lock; + u64 features[COMM_MAX_FEATURE_QWORD]; + u32 wq_page_size; + u8 max_cmdq; + ulong func_state; +}; + +struct hinic3_event_info { + /* enum hinic3_event_service_type */ + u16 service; + u16 type; + u8 event_data[104]; +}; + +struct hinic3_adev { + struct auxiliary_device adev; + struct hinic3_hwdev *hwdev; + enum hinic3_service_type svc_type; + + void (*event)(struct auxiliary_device *adev, + struct hinic3_event_info *event); +}; + +int hinic3_init_hwdev(struct pci_dev *pdev); +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev); + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c new file mode 100644 index 000000000000..0865453bf0e7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/io.h> + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag) +{ + /* Completed by later submission due to LoC limit. */ +} + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->hwif->attr.func_global_idx; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h new file mode 100644 index 000000000000..513c9680e6b6 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWIF_H_ +#define _HINIC3_HWIF_H_ + +#include <linux/build_bug.h> +#include <linux/spinlock_types.h> + +struct hinic3_hwdev; + +enum hinic3_func_type { + HINIC3_FUNC_TYPE_VF = 1, +}; + +struct hinic3_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* protect doorbell area alloc and free */ + spinlock_t idx_lock; +}; + +struct hinic3_func_attr { + enum hinic3_func_type func_type; + u16 func_global_idx; + u16 global_vf_id_of_pf; + u16 num_irqs; + u16 num_sq; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 ppf_idx; + u8 num_aeqs; + u8 num_ceqs; + u8 msix_flex_en; +}; + +static_assert(sizeof(struct hinic3_func_attr) == 20); + +struct hinic3_hwif { + u8 __iomem *cfg_regs_base; + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + struct hinic3_db_area db_area; + struct hinic3_func_attr attr; +}; + +enum hinic3_msix_state { + HINIC3_MSIX_ENABLE, + HINIC3_MSIX_DISABLE, +}; + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag); + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c new file mode 100644 index 000000000000..8b92eed25edf --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/netdevice.h> + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_dev.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_poll(struct napi_struct *napi, int budget) +{ + struct hinic3_irq_cfg *irq_cfg = + container_of(napi, struct hinic3_irq_cfg, napi); + struct hinic3_nic_dev *nic_dev; + bool busy = false; + int work_done; + + nic_dev = netdev_priv(irq_cfg->netdev); + + busy |= hinic3_tx_poll(irq_cfg->txq, budget); + + if (unlikely(!budget)) + return 0; + + work_done = hinic3_rx_poll(irq_cfg->rxq, budget); + busy |= work_done >= budget; + + if (busy) + return budget; + + if (likely(napi_complete_done(napi, work_done))) + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return work_done; +} + +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi); + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); + napi_enable(&irq_cfg->napi); +} + +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_RX, NULL); + netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id, + NETDEV_QUEUE_TYPE_TX, NULL); + netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id); + netif_napi_del(&irq_cfg->napi); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c new file mode 100644 index 000000000000..4827326e6a59 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/delay.h> +#include <linux/iopoll.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_lld.h" +#include "hinic3_mgmt.h" + +#define HINIC3_VF_PCI_CFG_REG_BAR 0 +#define HINIC3_PCI_INTR_REG_BAR 2 +#define HINIC3_PCI_DB_BAR 4 + +#define HINIC3_EVENT_POLL_SLEEP_US 1000 +#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000 + +static struct hinic3_adev_device { + const char *name; +} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = { + [HINIC3_SERVICE_T_NIC] = { + .name = "nic", + }, +}; + +static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + switch (svc_type) { + case HINIC3_SERVICE_T_NIC: + return hinic3_support_nic(hwdev); + default: + break; + } + + return false; +} + +static void hinic3_comm_adev_release(struct device *dev) +{ + struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev, + adev.dev); + + kfree(hadev); +} + +static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_adev *hadev; + const char *svc_name; + int ret; + + hadev = kzalloc(sizeof(*hadev), GFP_KERNEL); + if (!hadev) + return NULL; + + svc_name = hinic3_adev_devices[svc_type].name; + hadev->adev.name = svc_name; + hadev->adev.id = hwdev->dev_id; + hadev->adev.dev.parent = hwdev->dev; + hadev->adev.dev.release = hinic3_comm_adev_release; + hadev->svc_type = svc_type; + hadev->hwdev = hwdev; + + ret = auxiliary_device_init(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed init adev %s %u\n", + svc_name, hwdev->dev_id); + kfree(hadev); + return NULL; + } + + ret = auxiliary_device_add(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed to add adev %s %u\n", + svc_name, hwdev->dev_id); + auxiliary_device_uninit(&hadev->adev); + return NULL; + } + + return hadev; +} + +static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + struct hinic3_adev *hadev; + int timeout; + bool state; + + timeout = read_poll_timeout(test_and_set_bit, state, !state, + HINIC3_EVENT_POLL_SLEEP_US, + HINIC3_EVENT_POLL_TIMEOUT_US, + false, svc_type, &pci_adapter->state); + + hadev = pci_adapter->hadev[svc_type]; + auxiliary_device_delete(&hadev->adev); + auxiliary_device_uninit(&hadev->adev); + pci_adapter->hadev[svc_type] = NULL; + if (!timeout) + clear_bit(svc_type, &pci_adapter->state); +} + +static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + enum hinic3_service_type svc_type; + + mutex_lock(&pci_adapter->pdev_mutex); + + for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) { + if (!hinic3_adev_svc_supported(hwdev, svc_type)) + continue; + + pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev, + svc_type); + if (!pci_adapter->hadev[svc_type]) + goto err_del_adevs; + } + mutex_unlock(&pci_adapter->pdev_mutex); + return 0; + +err_del_adevs: + while (svc_type > 0) { + svc_type--; + if (pci_adapter->hadev[svc_type]) { + hinic3_del_one_adev(hwdev, svc_type); + pci_adapter->hadev[svc_type] = NULL; + } + } + mutex_unlock(&pci_adapter->pdev_mutex); + return -ENOMEM; +} + +static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + int i; + + mutex_lock(&pci_adapter->pdev_mutex); + for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) { + if (pci_adapter->hadev[i]) + hinic3_del_one_adev(hwdev, i); + } + mutex_unlock(&pci_adapter->pdev_mutex); +} + +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + return hadev->hwdev; +} + +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = event_handler; +} + +void hinic3_adev_event_unregister(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = NULL; +} + +static int hinic3_mapping_bar(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, + HINIC3_VF_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC3_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + dev_err(&pdev->dev, "Failed to map interrupt regs\n"); + goto err_unmap_cfg_reg_base; + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR); + if (!pci_adapter->db_base) { + dev_err(&pdev->dev, "Failed to map doorbell regs\n"); + goto err_unmap_intr_reg_base; + } + + return 0; + +err_unmap_intr_reg_base: + iounmap(pci_adapter->intr_reg_base); + +err_unmap_cfg_reg_base: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter) +{ + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int hinic3_pci_init(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) + return -ENOMEM; + + pci_adapter->pdev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + goto err_free_pci_adapter; + } + + err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to request regions\n"); + goto err_disable_device; + } + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_clear_master(pdev); + pci_release_regions(pdev); + +err_disable_device: + pci_disable_device(pdev); + +err_free_pci_adapter: + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); + + return err; +} + +static void hinic3_pci_uninit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); +} + +static int hinic3_func_init(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + int err; + + err = hinic3_init_hwdev(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to initialize hardware device\n"); + return err; + } + + err = hinic3_attach_aux_devices(pci_adapter->hwdev); + if (err) + goto err_free_hwdev; + + return 0; + +err_free_hwdev: + hinic3_free_hwdev(pci_adapter->hwdev); + + return err; +} + +static void hinic3_func_uninit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_detach_aux_devices(pci_adapter->hwdev); + hinic3_free_hwdev(pci_adapter->hwdev); +} + +static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + int err; + + err = hinic3_mapping_bar(pdev, pci_adapter); + if (err) { + dev_err(&pdev->dev, "Failed to map bar\n"); + goto err_out; + } + + err = hinic3_func_init(pdev, pci_adapter); + if (err) + goto err_unmap_bar; + + return 0; + +err_unmap_bar: + hinic3_unmapping_bar(pci_adapter); + +err_out: + dev_err(&pdev->dev, "PCIe device probe function failed\n"); + return err; +} + +static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + + hinic3_func_uninit(pdev); + hinic3_unmapping_bar(pci_adapter); +} + +static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + err = hinic3_pci_init(pdev); + if (err) + goto err_out; + + pci_adapter = pci_get_drvdata(pdev); + err = hinic3_probe_func(pci_adapter); + if (err) + goto err_uninit_pci; + + return 0; + +err_uninit_pci: + hinic3_pci_uninit(pdev); + +err_out: + dev_err(&pdev->dev, "PCIe device probe failed\n"); + return err; +} + +static void hinic3_remove(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_remove_func(pci_adapter); + hinic3_pci_uninit(pdev); +} + +static const struct pci_device_id hinic3_pci_table[] = { + /* Completed by later submission due to LoC limit. */ + {0, 0} + +}; + +MODULE_DEVICE_TABLE(pci, hinic3_pci_table); + +static void hinic3_shutdown(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic3_set_api_stop(pci_adapter->hwdev); +} + +static struct pci_driver hinic3_driver = { + .name = HINIC3_NIC_DRV_NAME, + .id_table = hinic3_pci_table, + .probe = hinic3_probe, + .remove = hinic3_remove, + .shutdown = hinic3_shutdown, + .sriov_configure = pci_sriov_configure_simple +}; + +int hinic3_lld_init(void) +{ + return pci_register_driver(&hinic3_driver); +} + +void hinic3_lld_exit(void) +{ + pci_unregister_driver(&hinic3_driver); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h new file mode 100644 index 000000000000..322b44803476 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_LLD_H_ +#define _HINIC3_LLD_H_ + +#include <linux/auxiliary_bus.h> + +struct hinic3_event_info; + +#define HINIC3_NIC_DRV_NAME "hinic3" + +int hinic3_lld_init(void); +void hinic3_lld_exit(void); +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)); +void hinic3_adev_event_unregister(struct auxiliary_device *adev); +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c new file mode 100644 index 000000000000..093aa6d775ff --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "hinic3_common.h" +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_lld.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver" + +#define HINIC3_RX_BUF_LEN 2048 +#define HINIC3_LRO_REPLENISH_THLD 256 +#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" + +#define HINIC3_SQ_DEPTH 1024 +#define HINIC3_RQ_DEPTH 1024 + +static int hinic3_alloc_txrxqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_alloc_txqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc txqs\n"); + return err; + } + + err = hinic3_alloc_rxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc rxqs\n"); + goto err_free_txqs; + } + + return 0; + +err_free_txqs: + hinic3_free_txqs(netdev); + + return err; +} + +static void hinic3_free_txrxqs(struct net_device *netdev) +{ + hinic3_free_rxqs(netdev); + hinic3_free_txqs(netdev); +} + +static int hinic3_init_nic_dev(struct net_device *netdev, + struct hinic3_hwdev *hwdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = hwdev->pdev; + + nic_dev->netdev = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev->hwdev = hwdev; + nic_dev->pdev = pdev; + + nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN; + nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD; + nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap; + + return 0; +} + +static int hinic3_sw_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; + nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; + + /* VF driver always uses random MAC address. During VM migration to a + * new device, the new device should learn the VMs old MAC rather than + * provide its own MAC. The product design assumes that every VF is + * suspectable to migration so the device avoids offering MAC address + * to VFs. + */ + eth_hw_addr_random(netdev); + err = hinic3_set_mac(hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(hwdev)); + if (err) { + dev_err(hwdev->dev, "Failed to set default MAC\n"); + return err; + } + + err = hinic3_alloc_txrxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc qps\n"); + goto err_del_mac; + } + + return 0; + +err_del_mac: + hinic3_del_mac(hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(hwdev)); + + return err; +} + +static void hinic3_sw_uninit(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_free_txrxqs(netdev); + hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(nic_dev->hwdev)); +} + +static void hinic3_assign_netdev_ops(struct net_device *netdev) +{ + hinic3_set_netdev_ops(netdev); +} + +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t cso_fts = 0; + netdev_features_t tso_fts = 0; + netdev_features_t dft_fts; + + dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM)) + cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC)) + cso_fts |= NETIF_F_SCTP_CRC; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO)) + tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= dft_fts | cso_fts | tso_fts; +} + +static int hinic3_set_default_hw_feature(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_set_nic_feature_to_hw(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to set nic features\n"); + return err; + } + + return 0; +} + +static void hinic3_link_status_change(struct net_device *netdev, + bool link_status_up) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + if (link_status_up) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = true; + netif_carrier_on(netdev); + netdev_dbg(netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = false; + netif_carrier_off(netdev); + netdev_dbg(netdev, "Link is down\n"); + } +} + +static void hinic3_nic_event(struct auxiliary_device *adev, + struct hinic3_event_info *event) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + netdev = nic_dev->netdev; + + switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, + HINIC3_NIC_EVENT_LINK_UP): + hinic3_link_status_change(netdev, true); + break; + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, + HINIC3_NIC_EVENT_LINK_DOWN): + hinic3_link_status_change(netdev, false); + break; + default: + break; + } +} + +static int hinic3_nic_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev); + struct pci_dev *pdev = hwdev->pdev; + struct hinic3_nic_dev *nic_dev; + struct net_device *netdev; + u16 max_qps, glb_func_id; + int err; + + if (!hinic3_support_nic(hwdev)) { + dev_dbg(&adev->dev, "HW doesn't support nic\n"); + return 0; + } + + hinic3_adev_event_register(adev, hinic3_nic_event); + + glb_func_id = hinic3_global_func_id(hwdev); + err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC); + if (err) { + dev_err(&adev->dev, "Failed to reset function\n"); + goto err_unregister_adev_event; + } + + max_qps = hinic3_func_max_qnum(hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + dev_err(&adev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_unregister_adev_event; + } + + nic_dev = netdev_priv(netdev); + dev_set_drvdata(&adev->dev, nic_dev); + err = hinic3_init_nic_dev(netdev, hwdev); + if (err) + goto err_free_netdev; + + err = hinic3_init_nic_io(nic_dev); + if (err) + goto err_free_netdev; + + err = hinic3_sw_init(netdev); + if (err) + goto err_free_nic_io; + + hinic3_assign_netdev_ops(netdev); + + netdev_feature_init(netdev); + err = hinic3_set_default_hw_feature(netdev); + if (err) + goto err_uninit_sw; + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) + goto err_uninit_nic_feature; + + return 0; + +err_uninit_nic_feature: + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + +err_uninit_sw: + hinic3_sw_uninit(netdev); + +err_free_nic_io: + hinic3_free_nic_io(nic_dev); + +err_free_netdev: + free_netdev(netdev); + +err_unregister_adev_event: + hinic3_adev_event_unregister(adev); + dev_err(&pdev->dev, "NIC service probe failed\n"); + + return err; +} + +static void hinic3_nic_remove(struct auxiliary_device *adev) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + if (!hinic3_support_nic(nic_dev->hwdev)) + return; + + netdev = nic_dev->netdev; + unregister_netdev(netdev); + + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + hinic3_sw_uninit(netdev); + + hinic3_free_nic_io(nic_dev); + + free_netdev(netdev); +} + +static const struct auxiliary_device_id hinic3_nic_id_table[] = { + { + .name = HINIC3_NIC_DRV_NAME ".nic", + }, + {} +}; + +static struct auxiliary_driver hinic3_nic_driver = { + .probe = hinic3_nic_probe, + .remove = hinic3_nic_remove, + .suspend = NULL, + .resume = NULL, + .name = "nic", + .id_table = hinic3_nic_id_table, +}; + +static __init int hinic3_nic_lld_init(void) +{ + int err; + + pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC); + + err = hinic3_lld_init(); + if (err) + return err; + + err = auxiliary_driver_register(&hinic3_nic_driver); + if (err) { + hinic3_lld_exit(); + return err; + } + + return 0; +} + +static __exit void hinic3_nic_lld_exit(void) +{ + auxiliary_driver_unregister(&hinic3_nic_driver); + + hinic3_lld_exit(); +} + +module_init(hinic3_nic_lld_init); +module_exit(hinic3_nic_lld_exit); + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c new file mode 100644 index 000000000000..e74d1eb09730 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/dma-mapping.h> + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h new file mode 100644 index 000000000000..d7a6c37b7eff --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MBOX_H_ +#define _HINIC3_MBOX_H_ + +#include <linux/bitfield.h> +#include <linux/mutex.h> + +struct hinic3_hwdev; + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h new file mode 100644 index 000000000000..4edabeb32112 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_H_ +#define _HINIC3_MGMT_H_ + +#include <linux/types.h> + +struct hinic3_hwdev; + +void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h new file mode 100644 index 000000000000..c4434efdc7f7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_INTERFACE_H_ +#define _HINIC3_MGMT_INTERFACE_H_ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/if_ether.h> + +#include "hinic3_hw_intf.h" + +struct l2nic_cmd_feature_nego { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[4]; +}; + +enum l2nic_func_tbl_cfg_bitmap { + L2NIC_FUNC_TBL_CFG_INIT = 0, + L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1, + L2NIC_FUNC_TBL_CFG_MTU = 2, +}; + +struct l2nic_func_tbl_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct l2nic_cmd_set_func_tbl { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd; + u32 cfg_bitmap; + struct l2nic_func_tbl_cfg tbl_cfg; +}; + +struct l2nic_cmd_set_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct l2nic_cmd_update_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct l2nic_cmd_force_pkt_drop { + struct mgmt_msg_head msg_head; + u8 port; + u8 rsvd1[3]; +}; + +/* Commands between NIC to fw */ +enum l2nic_cmd { + /* FUNC CFG */ + L2NIC_CMD_SET_FUNC_TBL = 5, + L2NIC_CMD_SET_VPORT_ENABLE = 6, + L2NIC_CMD_SET_SQ_CI_ATTR = 8, + L2NIC_CMD_CLEAR_QP_RESOURCE = 11, + L2NIC_CMD_FEATURE_NEGO = 15, + L2NIC_CMD_SET_MAC = 21, + L2NIC_CMD_DEL_MAC = 22, + L2NIC_CMD_UPDATE_MAC = 23, + L2NIC_CMD_CFG_RSS = 60, + L2NIC_CMD_CFG_RSS_HASH_KEY = 63, + L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64, + L2NIC_CMD_SET_RSS_CTX_TBL = 65, + L2NIC_CMD_QOS_DCB_STATE = 110, + L2NIC_CMD_FORCE_PKT_DROP = 113, + L2NIC_CMD_MAX = 256, +}; + +enum hinic3_nic_feature_cap { + HINIC3_NIC_F_CSUM = BIT(0), + HINIC3_NIC_F_SCTP_CRC = BIT(1), + HINIC3_NIC_F_TSO = BIT(2), + HINIC3_NIC_F_LRO = BIT(3), + HINIC3_NIC_F_UFO = BIT(4), + HINIC3_NIC_F_RSS = BIT(5), + HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6), + HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7), + HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8), + HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9), + HINIC3_NIC_F_FDIR = BIT(11), + HINIC3_NIC_F_PROMISC = BIT(12), + HINIC3_NIC_F_ALLMULTI = BIT(13), + HINIC3_NIC_F_RATE_LIMIT = BIT(16), +}; + +#define HINIC3_NIC_F_ALL_MASK 0x33bff +#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c new file mode 100644 index 000000000000..71104a6b8bef --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_open(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_close(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err; + + err = hinic3_set_port_mtu(netdev, new_mtu); + if (err) { + netdev_err(netdev, "Failed to change port mtu to %d\n", + new_mtu); + return err; + } + + netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + + return 0; +} + +static int hinic3_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) + return 0; + + err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr, + saddr->sa_data, 0, + hinic3_global_func_id(nic_dev->hwdev)); + + if (err) + return err; + + eth_hw_addr_set(netdev, saddr->sa_data); + + return 0; +} + +static const struct net_device_ops hinic3_netdev_ops = { + .ndo_open = hinic3_open, + .ndo_stop = hinic3_close, + .ndo_change_mtu = hinic3_change_mtu, + .ndo_set_mac_address = hinic3_set_mac_addr, + .ndo_start_xmit = hinic3_xmit_frame, +}; + +void hinic3_set_netdev_ops(struct net_device *netdev) +{ + netdev->netdev_ops = &hinic3_netdev_ops; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c new file mode 100644 index 000000000000..5b1a91a18c67 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/if_vlan.h> + +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode, + u64 *s_feature, u16 size) +{ + struct l2nic_cmd_feature_nego feature_nego = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + feature_nego.func_id = hinic3_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); + + mgmt_msg_params_init_default(&msg_params, &feature_nego, + sizeof(feature_nego)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FEATURE_NEGO, &msg_params); + if (err || feature_nego.msg_head.status) { + dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n", + err, feature_nego.msg_head.status); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); + + return 0; +} + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev) +{ + return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET, + &nic_dev->nic_io->feature_cap, 1); +} + +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits) +{ + return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits; +} + +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap) +{ + nic_dev->nic_io->feature_cap = feature_cap; +} + +static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap, + const struct l2nic_func_tbl_cfg *cfg) +{ + struct l2nic_cmd_set_func_tbl cmd_func_tbl = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + cmd_func_tbl.func_id = hinic3_global_func_id(hwdev); + cmd_func_tbl.cfg_bitmap = cfg_bitmap; + cmd_func_tbl.tbl_cfg = *cfg; + + mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl, + sizeof(cmd_func_tbl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_FUNC_TBL, &msg_params); + if (err || cmd_func_tbl.msg_head.status) { + dev_err(hwdev->dev, + "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n", + cfg_bitmap, err, cmd_func_tbl.msg_head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct l2nic_func_tbl_cfg func_tbl_cfg = {}; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + + func_tbl_cfg.mtu = new_mtu; + return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU), + &func_tbl_cfg); +} + +static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status, + u16 vlan_id) +{ + if ((status && status != MGMT_STATUS_EXIST) || + ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) { + return -EINVAL; + } + + return 0; +} + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + + if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) { + dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n"); + return 0; + } + + if (mac_info.msg_head.status == MGMT_STATUS_EXIST) { + dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_DEL_MAC, &msg_params); + if (err) { + dev_err(hwdev->dev, + "Failed to delete MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return err; + } + + return 0; +} + +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, + u8 *new_mac, u16 vlan_id, u16 func_id) +{ + struct l2nic_cmd_update_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.old_mac, old_mac); + ether_addr_copy(mac_info.new_mac, new_mac); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_UPDATE_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + return 0; +} + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev) +{ + struct l2nic_cmd_force_pkt_drop pkt_drop = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + pkt_drop.port = hinic3_physical_port_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FORCE_PKT_DROP, &msg_params); + if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED && + pkt_drop.msg_head.status) || err) { + dev_err(hwdev->dev, + "Failed to set force tx packets drop, err: %d, status: 0x%x\n", + err, pkt_drop.msg_head.status); + return -EFAULT; + } + + return pkt_drop.msg_head.status; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h new file mode 100644 index 000000000000..bf9ce51dc401 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_CFG_H_ +#define _HINIC3_NIC_CFG_H_ + +#include <linux/types.h> + +#include "hinic3_hw_intf.h" +#include "hinic3_mgmt_interface.h" + +struct hinic3_hwdev; +struct hinic3_nic_dev; + +#define HINIC3_MIN_MTU_SIZE 256 +#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC3_VLAN_ID_MASK 0x7FFF + +enum hinic3_nic_event_type { + HINIC3_NIC_EVENT_LINK_DOWN = 0, + HINIC3_NIC_EVENT_LINK_UP = 1, +}; + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev); +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits); +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap); + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu); + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, + u16 func_id); +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, + u8 *new_mac, u16 vlan_id, u16 func_id); + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h new file mode 100644 index 000000000000..c994fc9b6ee0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_DEV_H_ +#define _HINIC3_NIC_DEV_H_ + +#include <linux/netdevice.h> + +#include "hinic3_hw_cfg.h" +#include "hinic3_mgmt_interface.h" + +enum hinic3_flags { + HINIC3_RSS_ENABLE, +}; + +enum hinic3_rss_hash_type { + HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1, +}; + +struct hinic3_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +struct hinic3_irq_cfg { + struct net_device *netdev; + u16 msix_entry_idx; + /* provided by OS */ + u32 irq_id; + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic3_txq *txq; + struct hinic3_rxq *rxq; +}; + +struct hinic3_dyna_txrxq_params { + u16 num_qps; + u32 sq_depth; + u32 rq_depth; + + struct hinic3_dyna_txq_res *txqs_res; + struct hinic3_dyna_rxq_res *rxqs_res; + struct hinic3_irq_cfg *irq_cfg; +}; + +struct hinic3_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct hinic3_hwdev *hwdev; + struct hinic3_nic_io *nic_io; + + u16 max_qps; + u16 rx_buf_len; + u32 lro_replenish_thld; + unsigned long flags; + struct hinic3_nic_service_cap nic_svc_cap; + + struct hinic3_dyna_txrxq_params q_params; + struct hinic3_txq *txqs; + struct hinic3_rxq *rxqs; + + u16 num_qp_irq; + struct msix_entry *qps_msix_entries; + + bool link_status_up; +}; + +void hinic3_set_netdev_ops(struct net_device *netdev); + +/* Temporary prototypes. Functions become static in later submission. */ +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg); +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c new file mode 100644 index 000000000000..34a1f5bd5ac1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hw_intf.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h new file mode 100644 index 000000000000..865ba6878c48 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_IO_H_ +#define _HINIC3_NIC_IO_H_ + +#include <linux/bitfield.h> + +#include "hinic3_wq.h" + +struct hinic3_nic_dev; + +#define HINIC3_SQ_WQEBB_SHIFT 4 +#define HINIC3_RQ_WQEBB_SHIFT 3 +#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT) + +/* ******************** RQ_CTRL ******************** */ +enum hinic3_rq_wqe_type { + HINIC3_NORMAL_RQ_WQE = 1, +}; + +/* ******************** SQ_CTRL ******************** */ +#define HINIC3_TX_MSS_DEFAULT 0x3E00 +#define HINIC3_TX_MSS_MIN 0x50 +#define HINIC3_MAX_SQ_SGE 18 + +struct hinic3_io_queue { + struct hinic3_wq wq; + u8 owner; + u16 q_id; + u16 msix_entry_idx; + u8 __iomem *db_addr; + u16 *cons_idx_addr; +} ____cacheline_aligned; + +static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->cons_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->prod_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask; +} + +/* ******************** DB INFO ******************** */ +#define DB_INFO_QID_MASK GENMASK(12, 0) +#define DB_INFO_CFLAG_MASK BIT(23) +#define DB_INFO_COS_MASK GENMASK(26, 24) +#define DB_INFO_TYPE_MASK GENMASK(31, 27) +#define DB_INFO_SET(val, member) \ + FIELD_PREP(DB_INFO_##member##_MASK, val) + +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi)) +#define DB_SRC_TYPE 1 + +/* CFLAG_DATA_PATH */ +#define DB_CFLAG_DP_SQ 0 +#define DB_CFLAG_DP_RQ 1 + +struct hinic3_nic_db { + u32 db_info; + u32 pi_hi; +}; + +static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, + u8 cflag, u16 pi) +{ + struct hinic3_nic_db db; + + db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) | + DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | + DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + + writeq(*((u64 *)&db), DB_ADDR(queue, pi)); +} + +struct hinic3_nic_io { + struct hinic3_io_queue *sq; + struct hinic3_io_queue *rq; + + u16 num_qps; + u16 max_qps; + + /* Base address for consumer index of all tx queues. Each queue is + * given a full cache line to hold its consumer index. HW updates + * current consumer index as it consumes tx WQEs. + */ + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + u8 __iomem *sqs_db_addr; + u8 __iomem *rqs_db_addr; + + u16 rx_buf_len; + u64 feature_cap; +}; + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev); +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c new file mode 100644 index 000000000000..fab9011de9ad --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/device.h> + +#include "hinic3_hwdev.h" +#include "hinic3_queue_common.h" + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size) +{ + u32 elem_per_page; + + elem_per_page = min(page_size / elem_size, q_depth); + + qpages->pages = NULL; + qpages->page_size = page_size; + qpages->num_pages = max(q_depth / elem_per_page, 1); + qpages->elem_size_shift = ilog2(elem_size); + qpages->elem_per_pg_shift = ilog2(elem_per_page); +} + +static void __queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 pg_cnt) +{ + while (pg_cnt > 0) { + pg_cnt--; + hinic3_dma_free_coherent_align(hwdev->dev, + qpages->pages + pg_cnt); + } + kfree(qpages->pages); + qpages->pages = NULL; +} + +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages) +{ + __queue_pages_free(hwdev, qpages, qpages->num_pages); +} + +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align) +{ + u32 pg_idx; + int err; + + qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]), + GFP_KERNEL); + if (!qpages->pages) + return -ENOMEM; + + if (align == 0) + align = qpages->page_size; + + for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) { + err = hinic3_dma_zalloc_coherent_align(hwdev->dev, + qpages->page_size, + align, + GFP_KERNEL, + qpages->pages + pg_idx); + if (err) { + __queue_pages_free(hwdev, qpages, pg_idx); + return err; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h new file mode 100644 index 000000000000..ec4cae0a0929 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_QUEUE_COMMON_H_ +#define _HINIC3_QUEUE_COMMON_H_ + +#include <linux/types.h> + +#include "hinic3_common.h" + +struct hinic3_hwdev; + +struct hinic3_queue_pages { + /* Array of DMA-able pages that actually holds the queue entries. */ + struct hinic3_dma_addr_align *pages; + /* Page size in bytes. */ + u32 page_size; + /* Number of pages, must be power of 2. */ + u16 num_pages; + u8 elem_size_shift; + u8 elem_per_pg_shift; +}; + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size); +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align); +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages); + +/* Get pointer to queue entry at the specified index. Index does not have to be + * masked to queue depth, only least significant bits will be used. Also + * provides remaining elements in same page (including the first one) in case + * caller needs multiple entries. + */ +static inline void *get_q_element(const struct hinic3_queue_pages *qpages, + u32 idx, u32 *remaining_in_page) +{ + const struct hinic3_dma_addr_align *page; + u32 page_idx, elem_idx, elem_per_pg, ofs; + u8 shift; + + shift = qpages->elem_per_pg_shift; + page_idx = (idx >> shift) & (qpages->num_pages - 1); + elem_per_pg = 1 << shift; + elem_idx = idx & (elem_per_pg - 1); + if (remaining_in_page) + *remaining_in_page = elem_per_pg - elem_idx; + ofs = elem_idx << qpages->elem_size_shift; + page = qpages->pages + page_idx; + return (char *)page->align_vaddr + ofs; +} + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c new file mode 100644 index 000000000000..860163e9d66c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <net/gro.h> +#include <net/page_pool/helpers.h> + +#include "hinic3_hwdev.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" + +#define HINIC3_RX_HDR_SIZE 256 +#define HINIC3_RX_BUFFER_WRITE 16 + +#define HINIC3_RX_TCP_PKT 0x3 +#define HINIC3_RX_UDP_PKT 0x4 +#define HINIC3_RX_SCTP_PKT 0x7 + +#define HINIC3_RX_IPV4_PKT 0 +#define HINIC3_RX_IPV6_PKT 1 +#define HINIC3_RX_INVALID_IP_TYPE 2 + +#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0 +#define HINIC3_RX_PKT_FORMAT_VXLAN 1 + +#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66 +#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86 +#define HINIC3_LRO_PKT_HDR_LEN(cqe) \ + (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \ + HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \ + HINIC3_LRO_PKT_HDR_LEN_IPV4) + +int hinic3_alloc_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +static int rx_alloc_mapped_page(struct page_pool *page_pool, + struct hinic3_rx_info *rx_info, u16 buf_len) +{ + struct page *page; + u32 page_offset; + + page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len); + if (unlikely(!page)) + return -ENOMEM; + + rx_info->page = page; + rx_info->page_offset = page_offset; + + return 0; +} + +static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx, + dma_addr_t dma_addr, u16 len) +{ + struct hinic3_rq_wqe *rq_wqe; + + rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); + rq_wqe->buf_hi_addr = upper_32_bits(dma_addr); + rq_wqe->buf_lo_addr = lower_32_bits(dma_addr); +} + +static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) +{ + u32 i, free_wqebbs = rxq->delta - 1; + struct hinic3_rx_info *rx_info; + dma_addr_t dma_addr; + int err; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + err = rx_alloc_mapped_page(rxq->page_pool, rx_info, + rxq->buf_len); + if (unlikely(err)) + break; + + dma_addr = page_pool_get_dma_addr(rx_info->page) + + rx_info->page_offset; + rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr, + rxq->buf_len); + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ, + rxq->next_to_update << HINIC3_NORMAL_RQ_WQE); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } + + return i; +} + +static void hinic3_add_rx_frag(struct hinic3_rxq *rxq, + struct hinic3_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + net_prefetch(va); + + page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset, + rxq->buf_len); + + if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, + ALIGN(size, sizeof(long))); + page_pool_put_full_page(rxq->page_pool, page, false); + + return; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_info->page_offset, size, rxq->buf_len); + skb_mark_for_recycle(skb); +} + +static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb, + u32 sge_num, u32 pkt_len) +{ + struct hinic3_rx_info *rx_info; + u32 temp_pkt_len = pkt_len; + u32 temp_sge_num = sge_num; + u32 sw_ci; + u32 size; + + sw_ci = rxq->cons_idx & rxq->q_mask; + while (temp_sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(temp_pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + temp_pkt_len -= rxq->buf_len; + } else { + size = temp_pkt_len; + } + + hinic3_add_rx_frag(rxq, rx_info, skb, size); + + /* clear contents of buffer_info */ + rx_info->page = NULL; + temp_sge_num--; + } +} + +static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len) +{ + u32 sge_num; + + sge_num = pkt_len >> rxq->buf_len_shift; + sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0; + + return sge_num; +} + +static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, + u32 pkt_len) +{ + struct sk_buff *skb; + u32 sge_num; + + skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + sge_num = hinic3_get_sge_num(rxq, pkt_len); + + net_prefetchw(skb->data); + packaging_skb(rxq, skb, sge_num, pkt_len); + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; + + return skb; +} + +static void hinic3_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type, + u32 status, struct sk_buff *skb) +{ + u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT); + u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE); + u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE); + u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR); + struct net_device *netdev = rxq->netdev; + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(csum_err)) { + /* pkt type is recognized by HW, and csum is wrong */ + skb->ip_summed = CHECKSUM_NONE; + return; + } + + if (ip_type == HINIC3_RX_INVALID_IP_TYPE || + !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL || + pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + switch (pkt_type) { + case HINIC3_RX_TCP_PKT: + case HINIC3_RX_UDP_PKT: + case HINIC3_RX_SCTP_PKT: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } +} + +static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + __be16 proto; + + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb), + num_lro); + skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ? + SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_segs = num_lro; +} + +static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) +{ + struct net_device *netdev = rxq->netdev; + struct sk_buff *skb; + u32 offload_type; + u16 num_lro; + + skb = hinic3_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) + return -ENOMEM; + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic3_pull_tail(skb); + + offload_type = rx_cqe->offload_type; + hinic3_rx_csum(rxq, offload_type, status, skb); + + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + hinic3_lro_set_gso_params(skb, num_lro); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + + return 0; +} + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 sw_ci, status, pkt_len, vlan_len; + struct hinic3_rq_cqe *rx_cqe; + u32 num_wqe = 0; + int nr_pkts = 0; + u16 num_lro; + + while (likely(nr_pkts < budget)) { + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_cqe = rxq->cqe_arr + sw_ci; + status = rx_cqe->status; + if (!RQ_CQE_STATUS_GET(status, RXDONE)) + break; + + /* make sure we read rx_done before packet length */ + rmb(); + + vlan_len = rx_cqe->vlan_len; + pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN); + if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) + break; + + nr_pkts++; + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + num_wqe += hinic3_get_sge_num(rxq, pkt_len); + + rx_cqe->status = 0; + + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } + + if (rxq->delta >= HINIC3_RX_BUFFER_WRITE) + hinic3_rx_fill_buffers(rxq); + + return nr_pkts; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h new file mode 100644 index 000000000000..1cca21858d40 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_RX_H_ +#define _HINIC3_RX_H_ + +#include <linux/bitfield.h> +#include <linux/netdevice.h> + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21) +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val) + +#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0) +#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16) +#define RQ_CQE_SGE_GET(val, member) \ + FIELD_GET(RQ_CQE_SGE_##member##_MASK, val) + +#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0) +#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16) +#define RQ_CQE_STATUS_RXDONE_MASK BIT(31) +#define RQ_CQE_STATUS_GET(val, member) \ + FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val) + +/* RX Completion information that is provided by HW for a specific RX WQE */ +struct hinic3_rq_cqe { + u32 status; + u32 vlan_len; + u32 offload_type; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic3_rq_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct hinic3_rx_info { + struct page *page; + u32 page_offset; +}; + +struct hinic3_rxq { + struct net_device *netdev; + + u16 q_id; + u32 q_depth; + u32 q_mask; + + u16 buf_len; + u32 buf_len_shift; + + u32 cons_idx; + u32 delta; + + u32 irq_id; + u16 msix_entry_idx; + + /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is + * statically associated (by index) to a specific rq_wqe. + */ + struct hinic3_rq_cqe *cqe_arr; + struct hinic3_rx_info *rx_info; + struct page_pool *page_pool; + + struct hinic3_io_queue *rq; + + struct hinic3_irq_cfg *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + dma_addr_t cqe_start_paddr; +} ____cacheline_aligned; + +int hinic3_alloc_rxqs(struct net_device *netdev); +void hinic3_free_rxqs(struct net_device *netdev); + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c new file mode 100644 index 000000000000..ae08257dd1d2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <net/ip6_checksum.h> +#include <net/ipv6.h> +#include <net/netdev_queues.h> + +#include "hinic3_hwdev.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_tx.h" +#include "hinic3_wq.h" + +#define MIN_SKB_LEN 32 + +int hinic3_alloc_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + u16 q_id, num_txqs = nic_dev->max_qps; + struct pci_dev *pdev = nic_dev->pdev; + struct hinic3_txq *txq; + + if (!num_txqs) { + dev_err(hwdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->q_params.sq_depth; + txq->q_mask = nic_dev->q_params.sq_depth - 1; + txq->dev = &pdev->dev; + } + + return 0; +} + +void hinic3_free_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); +} + +static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = upper_32_bits(addr); + buf_descs->lo_addr = lower_32_bits(addr); + buf_descs->len = len; +} + +static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, + struct hinic3_txq *txq, + struct hinic3_tx_info *tx_info, + struct hinic3_sq_wqe_combo *wqe_combo) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dma_info *dma_info = tx_info->dma_info; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag; + u32 i, idx; + int err; + + dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) + return -EFAULT; + + dma_info[0].len = skb_headlen(skb); + + wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); + wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); + + wqe_desc->ctrl_len = dma_info[0].len; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &(skb_shinfo(skb)->frags[i]); + if (unlikely(i == wqe_combo->first_bds_num)) + buf_desc = wqe_combo->bds_sec2; + + idx = i + 1; + dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) { + err = -EFAULT; + goto err_unmap_page; + } + dma_info[idx].len = skb_frag_size(frag); + + hinic3_set_buf_desc(buf_desc, dma_info[idx].dma, + dma_info[idx].len); + buf_desc++; + } + + return 0; + +err_unmap_page: + while (idx > 1) { + idx--; + dma_unmap_page(&pdev->dev, dma_info[idx].dma, + dma_info[idx].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, + DMA_TO_DEVICE); + return err; +} + +static void hinic3_tx_unmap_skb(struct net_device *netdev, + struct sk_buff *skb, + struct hinic3_dma_info *dma_info) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags;) { + i++; + dma_unmap_page(&pdev->dev, + dma_info[i].dma, + dma_info[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_info[0].dma, + dma_info[0].len, DMA_TO_DEVICE); +} + +union hinic3_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union hinic3_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum hinic3_l3_type { + HINIC3_L3_UNKNOWN = 0, + HINIC3_L3_IP6_PKT = 1, + HINIC3_L3_IP4_PKT_NO_CSUM = 2, + HINIC3_L3_IP4_PKT_CSUM = 3, +}; + +enum hinic3_l4_offload_type { + HINIC3_L4_OFFLOAD_DISABLE = 0, + HINIC3_L4_OFFLOAD_TCP = 1, + HINIC3_L4_OFFLOAD_STCP = 2, + HINIC3_L4_OFFLOAD_UDP = 3, +}; + +/* initialize l4 offset and offload */ +static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4, + u8 l4_proto, u32 *offset, + enum hinic3_l4_offload_type *l4_offload) +{ + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = HINIC3_L4_OFFLOAD_TCP; + /* To be same with TSO, payload offset begins from payload */ + *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) + + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = HINIC3_L4_OFFLOAD_UDP; + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + default: + *l4_offload = HINIC3_L4_OFFLOAD_DISABLE; + *offset = 0; + } +} + +static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, + struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + union hinic3_ip ip; + u8 l4_proto; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + + ip.hdr = skb_network_header(skb); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == 6) { + union hinic3_l4 l4; + unsigned char *exthdr; + __be16 frag_off; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto != IPPROTO_UDP || + ((struct udphdr *)skb_transport_header(skb))->dest != + VXLAN_OFFLOAD_PORT_LE) { + /* Unsupported tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; + } + } + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + + return 1; +} + +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, + union hinic3_l4 *l4, + enum hinic3_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr; + __be16 frag_off; + + if (ip->v4->version == 4) { + *l3_type = HINIC3_L3_IP4_PKT_CSUM; + *l4_proto = ip->v4->protocol; + } else if (ip->v4->version == 6) { + *l3_type = HINIC3_L3_IP6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + ipv6_skip_exthdr(skb, exthdr - skb->data, + l4_proto, &frag_off); + } + } else { + *l3_type = HINIC3_L3_UNKNOWN; + *l4_proto = 0; + } +} + +static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, + enum hinic3_l4_offload_type l4_offload, + u32 offset, u32 mss) +{ + if (l4_offload == HINIC3_L4_OFFLOAD_TCP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN); + } + + /* enable L3 calculation */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); + + /* set MSS value */ + *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) +{ + return (ip->v4->version == 4) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + enum hinic3_l4_offload_type l4_offload; + enum hinic3_l3_type l3_type; + union hinic3_ip ip; + union hinic3_l4 l4; + u8 l4_proto; + u32 offset; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->encapsulation) { + u32 gso_type = skb_shinfo(skb)->gso_type; + /* L3 checksum is always enabled */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG); + + l4.hdr = skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN); + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + } + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); + + hinic3_set_tso_info(task, queue_info, l4_offload, offset, + skb_shinfo(skb)->gso_size); + + return 1; +} + +static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task, + u16 vlan_tag, u8 vlan_tpid) +{ + /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU + * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU, + * 4=select TPID4 in IPSU + */ + task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | + SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID); +} + +static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, + u32 *queue_info, struct hinic3_txq *txq) +{ + u32 offload = 0; + int tso_cs_en; + + task->pkt_info0 = 0; + task->ip_identify = 0; + task->rsvd = 0; + task->vlan_offload = 0; + + tso_cs_en = hinic3_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { + offload |= HINIC3_TX_OFFLOAD_TSO; + } else { + tso_cs_en = hinic3_tx_csum(txq, task, skb); + if (tso_cs_en) + offload |= HINIC3_TX_OFFLOAD_CSUM; + } + +#define VLAN_INSERT_MODE_MAX 5 + if (unlikely(skb_vlan_tag_present(skb))) { + /* select vlan insert mode by qid, default 802.1Q Tag type */ + hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), + txq->q_id % VLAN_INSERT_MODE_MAX); + offload |= HINIC3_TX_OFFLOAD_VLAN; + } + + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > + SQ_CTRL_MAX_PLDOFF)) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } + + return offload; +} + +static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq, + u16 curr_pi, u16 wqebb_cnt) +{ + u16 owner = sq->owner; + + if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = !sq->owner; + + return owner; +} + +static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq, + struct hinic3_sq_wqe_combo *wqe_combo, + u32 offload, u16 num_sge, u16 *curr_pi) +{ + struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs; + u16 first_part_wqebbs_num, tmp_pi; + + wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi); + if (!offload && num_sge == 1) { + wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1); + } + + wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; + + if (offload) { + wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq, + &tmp_pi); + wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; + } else { + wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; + } + + if (num_sge > 1) { + /* first wqebb contain bd0, and bd size is equal to sq wqebb + * size, so we use (num_sge - 1) as wanted weqbb_cnt + */ + hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi, + &first_part_wqebbs, + &second_part_wqebbs, + &first_part_wqebbs_num); + wqe_combo->bds_head = first_part_wqebbs; + wqe_combo->bds_sec2 = second_part_wqebbs; + wqe_combo->first_bds_num = first_part_wqebbs_num; + } + + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, + num_sge + !!offload); +} + +static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo, + u32 queue_info, int nr_descs, u16 owner) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + /* compact wqe queue_info will transfer to chip */ + wqe_desc->queue_info = 0; + return; + } + + wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->queue_info = queue_info; + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < + HINIC3_TX_MSS_MIN) { + /* mss should not be less than 80 */ + wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS); + } +} + +static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic3_txq *txq) +{ + struct hinic3_sq_wqe_combo wqe_combo = {}; + struct hinic3_tx_info *tx_info; + struct hinic3_txq *tx_q = txq; + u32 offload, queue_info = 0; + struct hinic3_sq_task task; + u16 wqebb_cnt, num_sge; + u16 saved_wq_prod_idx; + u16 owner, pi = 0; + u8 saved_sq_owner; + int err; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) + goto err_out; + + skb->len = MIN_SKB_LEN; + } + + num_sge = skb_shinfo(skb)->nr_frags + 1; + /* assume normal wqe format + 1 wqebb for task info */ + wqebb_cnt = num_sge + 1; + + if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) { + if (likely(wqebb_cnt > txq->tx_stop_thrs)) + txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs); + + netif_subqueue_try_stop(netdev, tx_q->sq->q_id, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_start_thrs); + + return NETDEV_TX_BUSY; + } + + offload = hinic3_tx_offload(skb, &task, &queue_info, txq); + if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) { + goto err_drop_pkt; + } else if (!offload) { + wqebb_cnt -= 1; + if (unlikely(num_sge == 1 && + skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN)) + goto err_drop_pkt; + } + + saved_wq_prod_idx = txq->sq->wq.prod_idx; + saved_sq_owner = txq->sq->owner; + + owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); + if (offload) + *wqe_combo.task = task; + + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + + err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo); + if (err) { + /* Rollback work queue to reclaim the wqebb we did not use */ + txq->sq->wq.prod_idx = saved_wq_prod_idx; + txq->sq->owner = saved_sq_owner; + goto err_drop_pkt; + } + + netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id), + skb->len); + netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_stop_thrs, + tx_q->tx_start_thrs); + + hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); + hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ, + hinic3_get_sq_local_pi(txq->sq)); + + return NETDEV_TX_OK; + +err_drop_pkt: + dev_kfree_skb_any(skb); + +err_out: + return NETDEV_TX_OK; +} + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) + goto err_drop_pkt; + + if (unlikely(q_id >= nic_dev->q_params.num_qps)) + goto err_drop_pkt; + + return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]); + +err_drop_pkt: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq) +{ + u16 sw_pi, hw_ci; + + sw_pi = hinic3_get_sq_local_pi(sq); + hw_ci = hinic3_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000 +#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000 +static int hinic3_stop_sq(struct hinic3_txq *txq) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev); + int err, rc; + + err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc, + is_hw_complete_sq_process(txq->sq) || rc, + HINIC3_FLUSH_QUEUE_POLL_SLEEP_US, + HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US, + true, nic_dev->hwdev); + if (rc) + return rc; + else + return err; +} + +/* packet transmission should be stopped before calling this function */ +void hinic3_flush_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + err = hinic3_stop_sq(&nic_dev->txqs[qid]); + netdev_tx_reset_subqueue(netdev, qid); + if (err) + netdev_err(netdev, "Failed to stop sq%u\n", qid); + } +} + +#define HINIC3_BDS_PER_SQ_WQEBB \ + (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc)) + +bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) +{ + struct net_device *netdev = txq->netdev; + u16 hw_ci, sw_ci, q_id = txq->sq->q_id; + struct hinic3_tx_info *tx_info; + struct hinic3_txq *tx_q = txq; + unsigned int bytes_compl = 0; + unsigned int pkts = 0; + u16 wqebb_cnt = 0; + + hw_ci = hinic3_get_sq_hw_ci(txq->sq); + dma_rmb(); + sw_ci = hinic3_get_sq_local_ci(txq->sq); + + do { + tx_info = &txq->tx_info[sw_ci]; + + /* Did all wqebb of this wqe complete? */ + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + net_prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + bytes_compl += tx_info->skb->len; + pkts++; + + hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info); + napi_consume_skb(tx_info->skb, budget); + tx_info->skb = NULL; + } while (likely(pkts < HINIC3_TX_POLL_WEIGHT)); + + hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt); + + netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl, + hinic3_wq_free_wqebbs(&tx_q->sq->wq), + tx_q->tx_start_thrs); + + return pkts == HINIC3_TX_POLL_WEIGHT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h new file mode 100644 index 000000000000..9e505cc19dd5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_TX_H_ +#define _HINIC3_TX_H_ + +#include <linux/bitops.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/netdevice.h> +#include <net/checksum.h> + +#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789) +#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data) + +#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383 +#define HINIC3_TX_POLL_WEIGHT 64 +#define HINIC3_DEFAULT_STOP_THRS 6 +#define HINIC3_DEFAULT_START_THRS 24 + +enum sq_wqe_data_format { + SQ_NORMAL_WQE = 0, +}; + +enum sq_wqe_ec_type { + SQ_WQE_COMPACT_TYPE = 0, + SQ_WQE_EXTENDED_TYPE = 1, +}; + +enum sq_wqe_tasksect_len_type { + SQ_WQE_TASKSECT_46BITS = 0, + SQ_WQE_TASKSECT_16BYTES = 1, +}; + +enum hinic3_tx_offload_type { + HINIC3_TX_OFFLOAD_TSO = BIT(0), + HINIC3_TX_OFFLOAD_CSUM = BIT(1), + HINIC3_TX_OFFLOAD_VLAN = BIT(2), + HINIC3_TX_OFFLOAD_INVALID = BIT(3), + HINIC3_TX_OFFLOAD_ESP = BIT(4), +}; + +#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19) +#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27) +#define SQ_CTRL_DATA_FORMAT_MASK BIT(28) +#define SQ_CTRL_EXTENDED_MASK BIT(30) +#define SQ_CTRL_OWNER_MASK BIT(31) +#define SQ_CTRL_SET(val, member) \ + FIELD_PREP(SQ_CTRL_##member##_MASK, val) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2) +#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10) +#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11) +#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13) +#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28) + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) + +#define SQ_CTRL_MAX_PLDOFF 221 + +#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19) +#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24) +#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25) +#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27) +#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28) +#define SQ_TASK_INFO0_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val) + +#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0) +#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16) +#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19) +#define SQ_TASK_INFO3_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val) + +struct hinic3_sq_wqe_desc { + u32 ctrl_len; + u32 queue_info; + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic3_sq_task { + u32 pkt_info0; + u32 ip_identify; + u32 rsvd; + u32 vlan_offload; +}; + +struct hinic3_sq_wqe_combo { + struct hinic3_sq_wqe_desc *ctrl_bd0; + struct hinic3_sq_task *task; + struct hinic3_sq_bufdesc *bds_head; + struct hinic3_sq_bufdesc *bds_sec2; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +struct hinic3_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct hinic3_tx_info { + struct sk_buff *skb; + u16 wqebb_cnt; + struct hinic3_dma_info *dma_info; +}; + +struct hinic3_txq { + struct net_device *netdev; + struct device *dev; + + u16 q_id; + u16 tx_stop_thrs; + u16 tx_start_thrs; + u32 q_mask; + u32 q_depth; + + struct hinic3_tx_info *tx_info; + struct hinic3_io_queue *sq; +} ____cacheline_aligned; + +int hinic3_alloc_txqs(struct net_device *netdev); +void hinic3_free_txqs(struct net_device *netdev); + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool hinic3_tx_poll(struct hinic3_txq *txq, int budget); +void hinic3_flush_txqs(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c new file mode 100644 index 000000000000..2ac7efcd1365 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include <linux/dma-mapping.h> + +#include "hinic3_hwdev.h" +#include "hinic3_wq.h" + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_part_wqebbs, + struct hinic3_sq_bufdesc **second_part_wqebbs, + u16 *first_part_wqebbs_num) +{ + u32 idx, remaining; + + idx = wq->prod_idx & wq->idx_mask; + wq->prod_idx += num_wqebbs; + *prod_idx = idx; + *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining); + if (likely(remaining >= num_wqebbs)) { + *first_part_wqebbs_num = num_wqebbs; + *second_part_wqebbs = NULL; + } else { + *first_part_wqebbs_num = remaining; + idx += remaining; + *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL); + } +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h new file mode 100644 index 000000000000..ab37893efd7e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_WQ_H_ +#define _HINIC3_WQ_H_ + +#include <linux/io.h> + +#include "hinic3_queue_common.h" + +struct hinic3_sq_bufdesc { + /* 31-bits Length, L2NIC only uses length[17:0] */ + u32 len; + u32 rsvd; + u32 hi_addr; + u32 lo_addr; +}; + +/* Work queue is used to submit elements (tx, rx, cmd) to hw. + * Driver is the producer that advances prod_idx. cons_idx is advanced when + * HW reports completions of previously submitted elements. + */ +struct hinic3_wq { + struct hinic3_queue_pages qpages; + /* Unmasked producer/consumer indices that are advanced to natural + * integer overflow regardless of queue depth. + */ + u16 cons_idx; + u16 prod_idx; + + u32 q_depth; + u16 idx_mask; + + /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical + * Address (CLA) using 1 of 2 levels: + * level 0 - direct mapping of single wq page + * level 1 - indirect mapping of multiple pages via additional page + * table. + * When wq uses level 1, wq_block will hold the allocated indirection + * table. + */ + dma_addr_t wq_block_paddr; + __be64 *wq_block_vaddr; +} ____cacheline_aligned; + +/* Get number of elements in work queue that are in-use. */ +static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq) +{ + return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx); +} + +static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq) +{ + /* Don't allow queue to become completely full, report (free - 1). */ + return wq->q_depth - hinic3_wq_get_used(wq) - 1; +} + +static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi) +{ + *pi = wq->prod_idx & wq->idx_mask; + wq->prod_idx++; + return get_q_element(&wq->qpages, *pi, NULL); +} + +static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs) +{ + wq->cons_idx += num_wqebbs; +} + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_part_wqebbs, + struct hinic3_sq_bufdesc **second_part_wqebbs, + u16 *first_part_wqebbs_num); + +#endif diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index c0c112d95b89..4f4b23465c47 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -27,6 +27,19 @@ config IBMVETH To compile this driver as a module, choose M here. The module will be called ibmveth. +config IBMVETH_KUNIT_TEST + bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on KUNIT=y && IBMVETH=y + default KUNIT_ALL_TESTS + help + This builds unit tests for the IBM LAN Virtual Ethernet driver. + + For more information on KUnit and unit tests in general, please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + source "drivers/net/ethernet/ibm/emac/Kconfig" config EHEA diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 04192190beba..24046fe16634 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -39,8 +39,6 @@ #include "ibmveth.h" static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, - bool reuse); static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); static struct kobj_type ktype_veth_pool; @@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, index = pool->free_map[free_index]; skb = NULL; - BUG_ON(index == IBM_VETH_INVALID_MAP); + if (WARN_ON(index == IBM_VETH_INVALID_MAP)) { + schedule_work(&adapter->work); + goto bad_index_failure; + } /* are we allocating a new buffer or recycling an old one */ if (pool->skbuff[index]) @@ -300,6 +301,7 @@ failure: DMA_FROM_DEVICE); dev_kfree_skb_any(pool->skbuff[index]); pool->skbuff[index] = NULL; +bad_index_failure: adapter->replenish_add_buff_failure++; mb(); @@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, } } -/* remove a buffer from a pool */ -static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, - u64 correlator, bool reuse) +/** + * ibmveth_remove_buffer_from_pool - remove a buffer from a pool + * @adapter: adapter instance + * @correlator: identifies pool and index + * @reuse: whether to reuse buffer + * + * Return: + * * %0 - success + * * %-EINVAL - correlator maps to pool or index out of range + * * %-EFAULT - pool and index map to null skb + */ +static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, + u64 correlator, bool reuse) { unsigned int pool = correlator >> 32; unsigned int index = correlator & 0xffffffffUL; unsigned int free_index; struct sk_buff *skb; - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); + if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || + WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { + schedule_work(&adapter->work); + return -EINVAL; + } skb = adapter->rx_buff_pool[pool].skbuff[index]; - BUG_ON(skb == NULL); + if (WARN_ON(!skb)) { + schedule_work(&adapter->work); + return -EFAULT; + } /* if we are going to reuse the buffer then keep the pointers around * but mark index as available. replenish will see the skb pointer and @@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, mb(); atomic_dec(&(adapter->rx_buff_pool[pool].available)); + + return 0; } /* get the current buffer on the rx queue */ @@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada unsigned int pool = correlator >> 32; unsigned int index = correlator & 0xffffffffUL; - BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); - BUG_ON(index >= adapter->rx_buff_pool[pool].size); + if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) || + WARN_ON(index >= adapter->rx_buff_pool[pool].size)) { + schedule_work(&adapter->work); + return NULL; + } return adapter->rx_buff_pool[pool].skbuff[index]; } -static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, - bool reuse) +/** + * ibmveth_rxq_harvest_buffer - Harvest buffer from pool + * + * @adapter: pointer to adapter + * @reuse: whether to reuse buffer + * + * Context: called from ibmveth_poll + * + * Return: + * * %0 - success + * * other - non-zero return from ibmveth_remove_buffer_from_pool + */ +static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter, + bool reuse) { u64 cor; + int rc; cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; - ibmveth_remove_buffer_from_pool(adapter, cor, reuse); + rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse); + if (unlikely(rc)) + return rc; if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { adapter->rx_queue.index = 0; adapter->rx_queue.toggle = !adapter->rx_queue.toggle; } + + return 0; } static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx) @@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev) return 0; } +/** + * ibmveth_reset - Handle scheduled reset work + * + * @w: pointer to work_struct embedded in adapter structure + * + * Context: This routine acquires rtnl_mutex and disables its NAPI through + * ibmveth_close. It can't be called directly in a context that has + * already acquired rtnl_mutex or disabled its NAPI, or directly from + * a poll routine. + * + * Return: void + */ +static void ibmveth_reset(struct work_struct *w) +{ + struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work); + struct net_device *netdev = adapter->netdev; + + netdev_dbg(netdev, "reset starting\n"); + + rtnl_lock(); + + dev_close(adapter->netdev); + dev_open(adapter->netdev, NULL); + + rtnl_unlock(); + + netdev_dbg(netdev, "reset complete\n"); +} + static int ibmveth_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { @@ -1324,7 +1393,8 @@ restart_poll: wmb(); /* suggested by larson1 */ adapter->rx_invalid_buffer++; netdev_dbg(netdev, "recycling invalid buffer\n"); - ibmveth_rxq_harvest_buffer(adapter, true); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) + break; } else { struct sk_buff *skb, *new_skb; int length = ibmveth_rxq_frame_length(adapter); @@ -1334,6 +1404,8 @@ restart_poll: __sum16 iph_check = 0; skb = ibmveth_rxq_get_buffer(adapter); + if (unlikely(!skb)) + break; /* if the large packet bit is set in the rx queue * descriptor, the mss will be written by PHYP eight @@ -1357,10 +1429,12 @@ restart_poll: if (rx_flush) ibmveth_flush_buffer(skb->data, length + offset); - ibmveth_rxq_harvest_buffer(adapter, true); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true))) + break; skb = new_skb; } else { - ibmveth_rxq_harvest_buffer(adapter, false); + if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false))) + break; skb_reserve(skb, offset); } @@ -1407,7 +1481,10 @@ restart_poll: * then check once more to make sure we are done. */ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); - BUG_ON(lpar_rc != H_SUCCESS); + if (WARN_ON(lpar_rc != H_SUCCESS)) { + schedule_work(&adapter->work); + goto out; + } if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, @@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) if (napi_schedule_prep(&adapter->napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - BUG_ON(lpar_rc != H_SUCCESS); + WARN_ON(lpar_rc != H_SUCCESS); __napi_schedule(&adapter->napi); } return IRQ_HANDLED; @@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; + INIT_WORK(&adapter->work, ibmveth_reset); adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); ibmveth_init_link_settings(netdev); @@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev) struct ibmveth_adapter *adapter = netdev_priv(netdev); int i; + cancel_work_sync(&adapter->work); + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) kobject_put(&adapter->rx_buff_pool[i].kobj); @@ -1791,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj, return 0; } +/** + * veth_pool_store - sysfs store handler for pool attributes + * @kobj: kobject embedded in pool + * @attr: attribute being changed + * @buf: value being stored + * @count: length of @buf in bytes + * + * Stores new value in pool attribute. Verifies the range of the new value for + * size and buff_size. Verifies that at least one pool remains available to + * receive MTU-sized packets. + * + * Context: Process context. + * Takes and releases rtnl_mutex to ensure correct ordering of close + * and open calls. + * Return: + * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools + * * %-EINVAL - New pool size or buffer size is out of range + * * count - Return count for success + * * other - Return value from a failed ibmveth_open call + */ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { @@ -1800,28 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent)); struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); + bool change = false; + u32 newbuff_size; + u32 oldbuff_size; + int newactive; + int oldactive; + u32 newsize; + u32 oldsize; long rc; rtnl_lock(); + oldbuff_size = pool->buff_size; + oldactive = pool->active; + oldsize = pool->size; + + newbuff_size = oldbuff_size; + newactive = oldactive; + newsize = oldsize; + if (attr == &veth_active_attr) { - if (value && !pool->active) { - if (netif_running(netdev)) { - if (ibmveth_alloc_buffer_pool(pool)) { - netdev_err(netdev, - "unable to alloc pool\n"); - rc = -ENOMEM; - goto unlock_err; - } - pool->active = 1; - ibmveth_close(netdev); - rc = ibmveth_open(netdev); - if (rc) - goto unlock_err; - } else { - pool->active = 1; - } - } else if (!value && pool->active) { + if (value && !oldactive) { + newactive = 1; + change = true; + } else if (!value && oldactive) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; /* Make sure there is a buffer pool with buffers that @@ -1841,43 +1943,44 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, goto unlock_err; } - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->active = 0; - rc = ibmveth_open(netdev); - if (rc) - goto unlock_err; - } - pool->active = 0; + newactive = 0; + change = true; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { rc = -EINVAL; goto unlock_err; - } else { - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->size = value; - rc = ibmveth_open(netdev); - if (rc) - goto unlock_err; - } else { - pool->size = value; - } + } + if (value != oldsize) { + newsize = value; + change = true; } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { rc = -EINVAL; goto unlock_err; - } else { - if (netif_running(netdev)) { - ibmveth_close(netdev); - pool->buff_size = value; - rc = ibmveth_open(netdev); - if (rc) - goto unlock_err; - } else { - pool->buff_size = value; + } + if (value != oldbuff_size) { + newbuff_size = value; + change = true; + } + } + + if (change) { + if (netif_running(netdev)) + ibmveth_close(netdev); + + pool->active = newactive; + pool->buff_size = newbuff_size; + pool->size = newsize; + + if (netif_running(netdev)) { + rc = ibmveth_open(netdev); + if (rc) { + pool->active = oldactive; + pool->buff_size = oldbuff_size; + pool->size = oldsize; + goto unlock_err; } } } @@ -1962,3 +2065,132 @@ static void __exit ibmveth_module_exit(void) module_init(ibmveth_module_init); module_exit(ibmveth_module_exit); + +#ifdef CONFIG_IBMVETH_KUNIT_TEST +#include <kunit/test.h> + +/** + * ibmveth_reset_kunit - reset routine for running in KUnit environment + * + * @w: pointer to work_struct embedded in adapter structure + * + * Context: Called in the KUnit environment. Does nothing. + * + * Return: void + */ +static void ibmveth_reset_kunit(struct work_struct *w) +{ + netdev_dbg(NULL, "reset_kunit starting\n"); + netdev_dbg(NULL, "reset_kunit complete\n"); +} + +/** + * ibmveth_remove_buffer_from_pool_test - unit test for some of + * ibmveth_remove_buffer_from_pool + * @test: pointer to kunit structure + * + * Tests the error returns from ibmveth_remove_buffer_from_pool. + * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be + * checked to see that these warnings happened. + * + * Return: void + */ +static void ibmveth_remove_buffer_from_pool_test(struct kunit *test) +{ + struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); + struct ibmveth_buff_pool *pool; + u64 correlator; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); + + INIT_WORK(&adapter->work, ibmveth_reset_kunit); + + /* Set sane values for buffer pools */ + for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, + pool_count[i], pool_size[i], + pool_active[i]); + + pool = &adapter->rx_buff_pool[0]; + pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); + + correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0; + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size; + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + correlator = (u64)0 | 0; + pool->skbuff[0] = NULL; + KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false)); + KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true)); + + flush_work(&adapter->work); +} + +/** + * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer + * @test: pointer to kunit structure + * + * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for + * the NULL returns, so dmesg should be checked to see that these warnings + * happened. + * + * Return: void + */ +static void ibmveth_rxq_get_buffer_test(struct kunit *test) +{ + struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL); + struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL); + struct ibmveth_buff_pool *pool; + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb); + + INIT_WORK(&adapter->work, ibmveth_reset_kunit); + + adapter->rx_queue.queue_len = 1; + adapter->rx_queue.index = 0; + adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry), + GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr); + + /* Set sane values for buffer pools */ + for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, + pool_count[i], pool_size[i], + pool_active[i]); + + pool = &adapter->rx_buff_pool[0]; + pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff); + + adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0; + KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); + + adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size; + KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter)); + + pool->skbuff[0] = skb; + adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0; + KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter)); + + flush_work(&adapter->work); +} + +static struct kunit_case ibmveth_test_cases[] = { + KUNIT_CASE(ibmveth_remove_buffer_from_pool_test), + KUNIT_CASE(ibmveth_rxq_get_buffer_test), + {} +}; + +static struct kunit_suite ibmveth_test_suite = { + .name = "ibmveth-kunit-test", + .test_cases = ibmveth_test_cases, +}; + +kunit_test_suite(ibmveth_test_suite); +#endif diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 8468e2c59d7a..b0a2460ec9f9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -134,38 +134,39 @@ struct ibmveth_rx_q { }; struct ibmveth_adapter { - struct vio_dev *vdev; - struct net_device *netdev; - struct napi_struct napi; - unsigned int mcastFilterSize; - void * buffer_list_addr; - void * filter_list_addr; - void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; - unsigned int tx_ltb_size; - dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; - dma_addr_t buffer_list_dma; - dma_addr_t filter_list_dma; - struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; - struct ibmveth_rx_q rx_queue; - int rx_csum; - int large_send; - bool is_active_trunk; - - u64 fw_ipv6_csum_support; - u64 fw_ipv4_csum_support; - u64 fw_large_send_support; - /* adapter specific stats */ - u64 replenish_task_cycles; - u64 replenish_no_mem; - u64 replenish_add_buff_failure; - u64 replenish_add_buff_success; - u64 rx_invalid_buffer; - u64 rx_no_buffer; - u64 tx_map_failed; - u64 tx_send_failed; - u64 tx_large_packets; - u64 rx_large_packets; - /* Ethtool settings */ + struct vio_dev *vdev; + struct net_device *netdev; + struct napi_struct napi; + struct work_struct work; + unsigned int mcastFilterSize; + void *buffer_list_addr; + void *filter_list_addr; + void *tx_ltb_ptr[IBMVETH_MAX_QUEUES]; + unsigned int tx_ltb_size; + dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES]; + dma_addr_t buffer_list_dma; + dma_addr_t filter_list_dma; + struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; + struct ibmveth_rx_q rx_queue; + int rx_csum; + int large_send; + bool is_active_trunk; + + u64 fw_ipv6_csum_support; + u64 fw_ipv4_csum_support; + u64 fw_large_send_support; + /* adapter specific stats */ + u64 replenish_task_cycles; + u64 replenish_no_mem; + u64 replenish_add_buff_failure; + u64 replenish_add_buff_success; + u64 rx_invalid_buffer; + u64 rx_no_buffer; + u64 tx_map_failed; + u64 tx_send_failed; + u64 tx_large_packets; + u64 rx_large_packets; + /* Ethtool settings */ u8 duplex; u32 speed; }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1640d2f27833..5a331c1c76cb 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -147,6 +147,8 @@ config IXGBE depends on PCI depends on PTP_1588_CLOCK_OPTIONAL select MDIO + select NET_DEVLINK + select PLDMFW select PHYLIB help This driver supports Intel(R) 10GbE PCI Express family of @@ -367,6 +369,7 @@ config IGC default n depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + depends on ETHTOOL_NETLINK help This driver supports Intel(R) Ethernet Controller I225-LM/I225-V family of adapters. diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ba9c19e6994c..952898151565 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -319,7 +319,7 @@ struct e1000_adapter { u16 tx_ring_count; u16 rx_ring_count; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; struct delayed_work systim_overflow_work; struct sk_buff *tx_hwtstamp_skb; unsigned long tx_hwtstamp_start; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8ebcb6a7d608..e0f492a6723f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3574,6 +3574,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable * @adapter: board private structure * @config: timestamp configuration + * @extack: netlink extended ACK for error report * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead @@ -3587,7 +3588,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) * exception of "all V2 events regardless of level 2 or 4". **/ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct e1000_hw *hw = &adapter->hw; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; @@ -3598,8 +3600,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, bool is_l2 = false; u32 regval; - if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) { + NL_SET_ERR_MSG(extack, "No HW timestamp support"); return -EINVAL; + } switch (config->tx_type) { case HWTSTAMP_TX_OFF: @@ -3608,6 +3612,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, case HWTSTAMP_TX_ON: break; default: + NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type"); return -ERANGE; } @@ -3681,6 +3686,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: + NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter"); return -ERANGE; } @@ -3693,7 +3699,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, ew32(TSYNCTXCTL, regval); if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != (regval & E1000_TSYNCTXCTL_ENABLED)) { - e_err("Timesync Tx Control register not set as expected\n"); + NL_SET_ERR_MSG(extack, + "Timesync Tx Control register not set as expected"); return -EAGAIN; } @@ -3706,7 +3713,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, E1000_TSYNCRXCTL_TYPE_MASK)) != (regval & (E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK))) { - e_err("Timesync Rx Control register not set as expected\n"); + NL_SET_ERR_MSG(extack, + "Timesync Rx Control register not set as expected"); return -EAGAIN; } @@ -3901,6 +3909,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter) { struct ptp_clock_info *info = &adapter->ptp_clock_info; struct e1000_hw *hw = &adapter->hw; + struct netlink_ext_ack extack = {}; unsigned long flags; u32 timinca; s32 ret_val; @@ -3932,7 +3941,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter) spin_unlock_irqrestore(&adapter->systim_lock, flags); /* restore the previous hwtstamp configuration settings */ - e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config, + &extack); + if (ret_val) { + if (extack._msg) + e_err("%s\n", extack._msg); + } } /** @@ -6079,8 +6093,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct e1000_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); @@ -6140,7 +6153,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, /** * e1000e_hwtstamp_set - control hardware time stamping * @netdev: network interface device structure - * @ifr: interface request + * @config: timestamp configuration + * @extack: netlink extended ACK report * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead @@ -6153,20 +6167,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, * specified. Matching the kind of event packet is not supported, with the * exception of "all V2 events regardless of level 2 or 4". **/ -static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +static int e1000e_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct hwtstamp_config config; int ret_val; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - ret_val = e1000e_config_hwtstamp(adapter, &config); + ret_val = e1000e_config_hwtstamp(adapter, config, extack); if (ret_val) return ret_val; - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -6178,38 +6190,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) * by hardware so notify the caller the requested packets plus * some others are time stamped. */ - config.rx_filter = HWTSTAMP_FILTER_SOME; + config->rx_filter = HWTSTAMP_FILTER_SOME; break; default: break; } - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; } -static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +static int e1000e_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *kernel_config) { struct e1000_adapter *adapter = netdev_priv(netdev); - return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, - sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; -} + *kernel_config = adapter->hwtstamp_config; -static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGMIIPHY: - case SIOCGMIIREG: - case SIOCSMIIREG: - return e1000_mii_ioctl(netdev, ifr, cmd); - case SIOCSHWTSTAMP: - return e1000e_hwtstamp_set(netdev, ifr); - case SIOCGHWTSTAMP: - return e1000e_hwtstamp_get(netdev, ifr); - default: - return -EOPNOTSUPP; - } + return 0; } static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) @@ -7346,9 +7343,11 @@ static const struct net_device_ops e1000e_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e1000_netpoll, #endif - .ndo_set_features = e1000_set_features, - .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, + .ndo_fix_features = e1000_fix_features, .ndo_features_check = passthru_features_check, + .ndo_hwtstamp_get = e1000e_hwtstamp_get, + .ndo_hwtstamp_set = e1000e_hwtstamp_set, }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 370b4bddee44..b11c35e307ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw) void i40e_clear_hw(struct i40e_hw *hw) { u32 num_queues, base_queue; - u32 num_pf_int; - u32 num_vf_int; + s32 num_pf_int; + s32 num_vf_int; u32 num_vfs; - u32 i, j; + s32 i; + u32 j; u32 val; u32 eol = 0x7ff; diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index fcb199efbea5..4af60e2f37df 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -1339,8 +1339,13 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); return 0; } @@ -1350,19 +1355,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; bool roce_ena = ctx->val.vbool; int ret; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + if (!roce_ena) { ice_unplug_aux_dev(pf); - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; return 0; } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; ret = ice_plug_aux_dev(pf); if (ret) - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; return ret; } @@ -1373,11 +1383,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; + + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP; - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { + if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP; } @@ -1390,8 +1405,13 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); return 0; } @@ -1401,19 +1421,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; bool iw_ena = ctx->val.vbool; int ret; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + if (!iw_ena) { ice_unplug_aux_dev(pf); - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; return 0; } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; ret = ice_plug_aux_dev(pf); if (ret) - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; return ret; } @@ -1428,7 +1453,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP; - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { + if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index fd083647c14a..ddd0ad68185b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -193,8 +193,6 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) -#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) - enum ice_feature { ICE_F_DSCP, ICE_F_PHY_RCLK, @@ -401,7 +399,6 @@ struct ice_vsi { u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; - u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ @@ -515,6 +512,7 @@ enum ice_pf_flags { ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ + ICE_FLAG_LLDP_AQ_FLTR, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -557,7 +555,6 @@ struct ice_pf { struct devlink_port devlink_port; /* OS reserved IRQ details */ - struct msix_entry *msix_entries; struct ice_irq_tracker irq_tracker; struct ice_virt_irq_tracker virt_irq_tracker; @@ -592,7 +589,6 @@ struct ice_pf { struct gnss_serial *gnss_serial; struct gnss_device *gnss_dev; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ - u16 rdma_base_vector; /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; @@ -625,14 +621,12 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ - u8 rdma_mode; u16 dcbx_cap; u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; - struct auxiliary_device *adev; int aux_idx; u32 sw_int_count; /* count of tc_flower filters specific to channel (aka where filter @@ -664,6 +658,7 @@ struct ice_pf { struct ice_dplls dplls; struct device *hwmon_dev; struct ice_health health_reporters; + struct iidc_rdma_core_dev_info *cdev_info; u8 num_quanta_prof_used; }; @@ -1045,4 +1040,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) } extern const struct xdp_metadata_ops ice_xdp_md_ops; + +/** + * ice_is_dual - Check if given config is multi-NAC + * @hw: pointer to HW structure + * + * Return: true if the device is running in mutli-NAC (Network + * Acceleration Complex) configuration variant, false otherwise + * (always false for non-E825 devices). + */ +static inline bool ice_is_dual(struct ice_hw *hw) +{ + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); +} + +/** + * ice_is_primary - Check if given device belongs to the primary complex + * @hw: pointer to HW structure + * + * Check if given PF/HW is running on primary complex in multi-NAC + * configuration. + * + * Return: true if the device is dual, false otherwise (always true + * for non-E825 devices). + */ +static inline bool ice_is_primary(struct ice_hw *hw) +{ + return hw->mac_type != ICE_MAC_GENERIC_3K_E825 || + !ice_is_dual(hw) || + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); +} + +/** + * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF + * @pf: pointer to PF structure + * + * Return: true if PF owns primary timer, false otherwise. + */ +static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf) +{ + return pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_primary(&pf->hw); +} + +/** + * ice_get_primary_hw - Get pointer to primary ice_hw structure + * @pf: pointer to PF structure + * + * Return: A pointer to ice_hw structure with access to timesync + * register space. + */ +static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf) +{ + if (!pf->adapter->ctrl_pf) + return &pf->hw; + else + return &pf->adapter->ctrl_pf->hw; +} #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 59df31c2c83f..4fedf0181c4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1135,6 +1135,8 @@ int ice_init_hw(struct ice_hw *hw) } } + hw->lane_num = ice_get_phy_lane_number(hw); + return 0; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); @@ -3434,7 +3436,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, msg.msg_addr_low = lower_16_bits(reg_offset); msg.msg_addr_high = receiver_id; msg.opcode = ice_sbq_msg_rd; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; err = ice_sbq_rw_reg(hw, &msg, flag); if (err) @@ -4082,10 +4084,12 @@ int ice_get_phy_lane_number(struct ice_hw *hw) continue; if (hw->pf_id == lport) { + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + ice_is_dual(hw) && !ice_is_primary(hw)) + lane += ICE_PORTS_PER_QUAD; kfree(options); return lane; } - lport++; } @@ -6011,15 +6015,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) /** * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter * @hw: pointer to HW struct - * @vsi_num: absolute HW index for VSI + * @vsi: VSI to add the filter to * @add: boolean for if adding or removing a filter + * + * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed + * with this HW or VSI, otherwise an error corresponding to + * the AQ transaction result. */ -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; struct ice_aq_desc desc; + if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw)) + return -EOPNOTSUPP; + cmd = &desc.params.lldp_filter_ctrl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); @@ -6029,7 +6039,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) else cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; - cmd->vsi_num = cpu_to_le16(vsi_num); + cmd->vsi_num = cpu_to_le16(vsi->vsi_num); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9b00aa0ddf10..64c530b39191 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -290,8 +290,7 @@ int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add); int ice_lldp_execute_pending_mib(struct ice_hw *hw); int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 74418c445cc4..64737fc62306 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) tlv->ouisubtype = htonl(ouisubtype); /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ - for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { + for (i = 0; i < DSCP_MAX; i++) { /* IPv4 mapping */ buf[i] = dcbcfg->dscp_map[i]; /* IPv6 mapping */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index a7c510832824..533eb8930aa8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); + struct iidc_rdma_event *event; int ret = ICE_DCB_NO_HW_CHG; - struct iidc_event *event; struct ice_vsi *pf_vsi; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) goto free_cfg; } - set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); @@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - struct iidc_event *event; + struct iidc_rdma_priv_dev_info *privd; + struct iidc_rdma_core_dev_info *cdev; + struct iidc_rdma_event *event; u8 tc_map = 0; int v, ret; @@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } - if (!locked) { + + cdev = pf->cdev_info; + if (cdev && !locked) { + privd = cdev->iidc_priv; + ice_setup_dcb_qos_info(pf, &privd->qos_info); /* Notify the AUX drivers that TC change is finished */ event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return; - set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); } @@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) goto dcb_init_err; } - ice_cfg_sw_lldp(pf_vsi, false, true); + ice_cfg_sw_rx_lldp(pf, true); pf->dcbx_cap = ice_dcb_get_mode(port_info, true); return 0; @@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, } /** + * ice_setup_dcb_qos_info - Setup DCB QoS information + * @pf: ptr to ice_pf + * @qos_info: QoS param instance + */ +void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) +{ + struct ice_dcbx_cfg *dcbx_cfg; + unsigned int i; + u32 up2tc; + + if (!pf || !qos_info) + return; + + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); + + qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg); + + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) + qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; + + qos_info->pfc_mode = dcbx_cfg->pfc_mode; + if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) + for (i = 0; i < DSCP_MAX; i++) + qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; +} + +/** * ice_dcb_is_mib_change_pending - Check if MIB change is pending * @state: MIB change state */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 800879a88c5e..da9ba814b4e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -31,6 +31,9 @@ void ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first); void +ice_setup_dcb_qos_info(struct ice_pf *pf, + struct iidc_rdma_qos_params *qos_info); +void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); /** @@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { } +static inline void +ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) +{ + qos_info->num_tc = 1; + qos_info->tc_info[0].rel_bw = 100; +} #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 6d50b90a7359..a10c1c8d8697 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) if (!ice_is_feature_supported(pf, ICE_F_DSCP)) return -EOPNOTSUPP; - if (app->protocol >= ICE_DSCP_NUM_VAL) { + if (app->protocol >= DSCP_MAX) { netdev_err(netdev, "DSCP value 0x%04X out of range\n", app->protocol); return -EINVAL; @@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) /* if the last DSCP mapping just got deleted, need to switch * to L2 VLAN QoS mode */ - if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && + if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) && new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index ed21d7f55ac1..6aae03771746 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, false); netif_addr_lock_bh(netdev); __dev_uc_unsync(netdev, NULL); @@ -245,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, u64 cd_cmd, dst_vsi; if (!dst) { + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(eth->h_proto == htons(ETH_P_LLDP))) + return; cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { @@ -278,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, true); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7c2dc347e4e5..bbf9e6fd315b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1818,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ - ice_cfg_sw_lldp(vsi, false, false); + ice_cfg_sw_rx_lldp(vsi->back, false); /* AQ command to start FW LLDP agent will return an * error if the agent is already started @@ -3964,11 +3964,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EINVAL; } - if (pf->adev) { + if (pf->cdev_info && pf->cdev_info->adev) { mutex_lock(&pf->adev_mutex); - device_lock(&pf->adev->dev); + device_lock(&pf->cdev_info->adev->dev); locked = true; - if (pf->adev->dev.driver) { + if (pf->cdev_info->adev->dev.driver) { netdev_err(dev, "Cannot change channels when RDMA is active\n"); ret = -EBUSY; goto adev_unlock; @@ -3987,7 +3987,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) adev_unlock: if (locked) { - device_unlock(&pf->adev->dev); + device_unlock(&pf->cdev_info->adev->dev); mutex_unlock(&pf->adev_mutex); } return ret; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 1d118171de37..aceec184e89b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1605,7 +1605,7 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf) */ int ice_fdir_create_dflt_rules(struct ice_pf *pf) { - const enum ice_fltr_ptype dflt_rules[] = { + static const enum ice_fltr_ptype dflt_rules[] = { ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, }; diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index bab3e81cad5d..6ab53e430f91 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -9,22 +9,25 @@ static DEFINE_XARRAY_ALLOC1(ice_aux_id); /** - * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct - * @pf: pointer to PF struct + * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * * This function has to be called with a device_lock on the - * pf->adev.dev to avoid race conditions. + * cdev->adev.dev to avoid race conditions. + * + * Return: pointer to the matched auxiliary driver struct */ -static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) +static struct iidc_rdma_core_auxiliary_drv * +ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev) { struct auxiliary_device *adev; - adev = pf->adev; + adev = cdev->adev; if (!adev || !adev->dev.driver) return NULL; - return container_of(adev->dev.driver, struct iidc_auxiliary_drv, - adrv.driver); + return container_of(adev->dev.driver, + struct iidc_rdma_core_auxiliary_drv, adrv.driver); } /** @@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) * @pf: pointer to PF struct * @event: event struct */ -void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event) { - struct iidc_auxiliary_drv *iadrv; + struct iidc_rdma_core_auxiliary_drv *iadrv; + struct iidc_rdma_core_dev_info *cdev; if (WARN_ON_ONCE(!in_task())) return; + cdev = pf->cdev_info; + if (!cdev) + return; + mutex_lock(&pf->adev_mutex); - if (!pf->adev) + if (!cdev->adev) goto finish; - device_lock(&pf->adev->dev); - iadrv = ice_get_auxiliary_drv(pf); + device_lock(&cdev->adev->dev); + iadrv = ice_get_auxiliary_drv(cdev); if (iadrv && iadrv->event_handler) - iadrv->event_handler(pf, event); - device_unlock(&pf->adev->dev); + iadrv->event_handler(cdev, event); + device_unlock(&cdev->adev->dev); finish: mutex_unlock(&pf->adev_mutex); } /** * ice_add_rdma_qset - Add Leaf Node for RDMA Qset - * @pf: PF struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * @qset: Resource to be allocated + * + * Return: Zero on success or error code encountered */ -int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev, + struct iidc_rdma_qset_params *qset) { u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; struct ice_vsi *vsi; struct device *dev; + struct ice_pf *pf; u32 qset_teid; u16 qs_handle; int status; int i; - if (WARN_ON(!pf || !qset)) + if (WARN_ON(!cdev || !qset)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); dev = ice_pf_to_dev(pf); if (!ice_is_rdma_ena(pf)) @@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) dev_err(dev, "Failed VSI RDMA Qset enable\n"); return status; } - vsi->qset_handle[qset->tc] = qset->qs_handle; qset->teid = qset_teid; return 0; @@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset); /** * ice_del_rdma_qset - Delete leaf node for RDMA Qset - * @pf: PF struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * @qset: Resource to be freed + * + * Return: Zero on success, error code on failure */ -int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev, + struct iidc_rdma_qset_params *qset) { struct ice_vsi *vsi; + struct ice_pf *pf; u32 teid; u16 q_id; - if (WARN_ON(!pf || !qset)) + if (WARN_ON(!cdev || !qset)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); vsi = ice_find_vsi(pf, qset->vport_id); if (!vsi) { dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); @@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) q_id = qset->qs_handle; teid = qset->teid; - vsi->qset_handle[qset->tc] = 0; - return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); } EXPORT_SYMBOL_GPL(ice_del_rdma_qset); /** * ice_rdma_request_reset - accept request from RDMA to perform a reset - * @pf: struct for PF + * @cdev: pointer to iidc_rdma_core_dev_info struct * @reset_type: type of reset + * + * Return: Zero on success, error code on failure */ -int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) +int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev, + enum iidc_rdma_reset_type reset_type) { enum ice_reset_req reset; + struct ice_pf *pf; - if (WARN_ON(!pf)) + if (WARN_ON(!cdev)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); + switch (reset_type) { - case IIDC_PFR: + case IIDC_FUNC_RESET: reset = ICE_RESET_PFR; break; - case IIDC_CORER: + case IIDC_DEV_RESET: reset = ICE_RESET_CORER; break; - case IIDC_GLOBR: - reset = ICE_RESET_GLOBR; - break; default: - dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); return -EINVAL; } @@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset); /** * ice_rdma_update_vsi_filter - update main VSI filters for RDMA - * @pf: pointer to struct for PF + * @cdev: pointer to iidc_rdma_core_dev_info struct * @vsi_id: VSI HW idx to update filter on * @enable: bool whether to enable or disable filters + * + * Return: Zero on success, error code on failure */ -int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) +int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, + u16 vsi_id, bool enable) { struct ice_vsi *vsi; + struct ice_pf *pf; int status; - if (WARN_ON(!pf)) + if (WARN_ON(!cdev)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); vsi = ice_find_vsi(pf, vsi_id); if (!vsi) return -EINVAL; @@ -201,37 +223,23 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); /** - * ice_get_qos_params - parse QoS params for RDMA consumption - * @pf: pointer to PF struct - * @qos: set of QoS values + * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver + * @cdev: pointer to iidc_rdma_core_dev_info struct + * @entry: MSI-X entry to be removed + * + * Return: Zero on success, error code on failure */ -void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) +int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, + struct msix_entry *entry) { - struct ice_dcbx_cfg *dcbx_cfg; - unsigned int i; - u32 up2tc; - - dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); - - qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); - for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) - qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; - - qos->pfc_mode = dcbx_cfg->pfc_mode; - if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) - for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) - qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; -} -EXPORT_SYMBOL_GPL(ice_get_qos_params); + struct msi_map map; + struct ice_pf *pf; -int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) -{ - struct msi_map map = ice_alloc_irq(pf, true); + if (WARN_ON(!cdev)) + return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); + map = ice_alloc_irq(pf, true); if (map.index < 0) return -ENOMEM; @@ -244,12 +252,19 @@ EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector); /** * ice_free_rdma_qvector - free vector resources reserved for RDMA driver - * @pf: board private structure to initialize + * @cdev: pointer to iidc_rdma_core_dev_info struct * @entry: MSI-X entry to be removed */ -void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) +void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, + struct msix_entry *entry) { struct msi_map map; + struct ice_pf *pf; + + if (WARN_ON(!cdev || !entry)) + return; + + pf = pci_get_drvdata(cdev->pdev); map.index = entry->entry; map.virq = entry->vector; @@ -263,19 +278,23 @@ EXPORT_SYMBOL_GPL(ice_free_rdma_qvector); */ static void ice_adev_release(struct device *dev) { - struct iidc_auxiliary_dev *iadev; + struct iidc_rdma_core_auxiliary_dev *iadev; - iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, + adev.dev); kfree(iadev); } /** * ice_plug_aux_dev - allocate and register AUX device * @pf: pointer to pf struct + * + * Return: Zero on success, error code on failure */ int ice_plug_aux_dev(struct ice_pf *pf) { - struct iidc_auxiliary_dev *iadev; + struct iidc_rdma_core_auxiliary_dev *iadev; + struct iidc_rdma_core_dev_info *cdev; struct auxiliary_device *adev; int ret; @@ -285,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf) if (!ice_is_rdma_ena(pf)) return 0; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; adev = &iadev->adev; - iadev->pf = pf; + iadev->cdev_info = cdev; adev->id = pf->aux_idx; adev->dev.release = ice_adev_release; adev->dev.parent = &pf->pdev->dev; - adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; + adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ? + "roce" : "iwarp"; ret = auxiliary_device_init(adev); if (ret) { @@ -310,7 +334,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) } mutex_lock(&pf->adev_mutex); - pf->adev = adev; + cdev->adev = adev; mutex_unlock(&pf->adev_mutex); return 0; @@ -324,8 +348,8 @@ void ice_unplug_aux_dev(struct ice_pf *pf) struct auxiliary_device *adev; mutex_lock(&pf->adev_mutex); - adev = pf->adev; - pf->adev = NULL; + adev = pf->cdev_info->adev; + pf->cdev_info->adev = NULL; mutex_unlock(&pf->adev_mutex); if (adev) { @@ -340,7 +364,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf) */ int ice_init_rdma(struct ice_pf *pf) { + struct iidc_rdma_priv_dev_info *privd; struct device *dev = &pf->pdev->dev; + struct iidc_rdma_core_dev_info *cdev; int ret; if (!ice_is_rdma_ena(pf)) { @@ -348,22 +374,50 @@ int ice_init_rdma(struct ice_pf *pf) return 0; } + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + pf->cdev_info = cdev; + + privd = kzalloc(sizeof(*privd), GFP_KERNEL); + if (!privd) { + ret = -ENOMEM; + goto err_privd_alloc; + } + + privd->pf_id = pf->hw.pf_id; ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), GFP_KERNEL); if (ret) { dev_err(dev, "Failed to allocate device ID for AUX driver\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_alloc_xa; } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->iidc_priv = privd; + privd->netdev = pf->vsi[0]->netdev; + + privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr; + cdev->pdev = pf->pdev; + privd->vport_id = pf->vsi[0]->vsi_num; + + pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; + ice_setup_dcb_qos_info(pf, &privd->qos_info); ret = ice_plug_aux_dev(pf); if (ret) goto err_plug_aux_dev; return 0; err_plug_aux_dev: - pf->adev = NULL; + pf->cdev_info->adev = NULL; xa_erase(&ice_aux_id, pf->aux_idx); +err_alloc_xa: + kfree(privd); +err_privd_alloc: + kfree(cdev); + pf->cdev_info = NULL; + return ret; } @@ -378,4 +432,7 @@ void ice_deinit_rdma(struct ice_pf *pf) ice_unplug_aux_dev(pf); xa_erase(&ice_aux_id, pf->aux_idx); + kfree(pf->cdev_info->iidc_priv); + kfree(pf->cdev_info); + pf->cdev_info = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h index 4b0c86757df9..17dbfcfb6a2a 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc_int.h +++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h @@ -4,10 +4,11 @@ #ifndef _ICE_IDC_INT_H_ #define _ICE_IDC_INT_H_ -#include <linux/net/intel/iidc.h> +#include <linux/net/intel/iidc_rdma.h> +#include <linux/net/intel/iidc_rdma_ice.h> struct ice_pf; -void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event); +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event); #endif /* !_ICE_IDC_INT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0bcf9d127ac9..03bb16191237 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2065,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) } /** - * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling + * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured * @tx: bool to determine Tx or Rx rule * @create: bool to determine create or remove Rule + * + * Adding an ethtype Tx rule to the uplink VSI results in it being applied + * to the whole port, so LLDP transmission for VFs will be blocked too. */ -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, enum ice_sw_fwd_act_type act); @@ -2085,19 +2088,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, ICE_DROP_PACKET); } else { - if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { - status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, - create); - } else { + if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI); + if (!status || !create) + goto report; + + dev_info(dev, + "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", + vsi->vsi_num, status); } + + status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); + if (!status) + set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); + } +report: if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", - create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, status); + dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", + create ? "add" : "remove", tx ? "Tx" : "Rx", + vsi->vsi_num, status); +} + +/** + * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP + * @pf: the PF being configured + * @enable: enable or disable + * + * Configure switch rules to enable/disable LLDP handling by software + * across PF. + */ +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) +{ + struct ice_vsi *vsi; + struct ice_vf *vf; + unsigned int bkt; + + vsi = ice_get_main_vsi(pf); + ice_vsi_cfg_sw_lldp(vsi, false, enable); + + if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + return; + + ice_for_each_vf(pf, bkt, vf) { + vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + continue; + + if (ice_vf_is_lldp_ena(vf)) + ice_vsi_cfg_sw_lldp(vsi, false, enable); + } } /** @@ -2528,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, true); + ice_vsi_cfg_sw_lldp(vsi, true, true); } if (!vsi->agg_node) @@ -2825,9 +2868,11 @@ int ice_vsi_release(struct ice_vsi *vsi) /* The Rx rule will only exist to remove if the LLDP FW * engine is currently stopped */ - if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && - !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); + if (!ice_is_safe_mode(pf) && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && + (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && + ice_vf_is_lldp_ena(vsi->vf)))) + ice_vsi_cfg_sw_lldp(vsi, false, false); ice_vsi_decfg(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index b4c9cb28a016..654516c5fc3e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -29,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable); int ice_set_link(struct ice_vsi *vsi, bool ena); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d390157b59fe..20d3baf955e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2401,11 +2401,11 @@ static void ice_service_task(struct work_struct *work) } if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { - struct iidc_event *event; + struct iidc_rdma_event *event; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event) { - set_bit(IIDC_EVENT_CRIT_ERR, event->type); + set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type); /* report the entire OICR value to AUX driver */ swap(event->reg, pf->oicr_err_reg); ice_send_event_to_aux(pf, event); @@ -2424,11 +2424,11 @@ static void ice_service_task(struct work_struct *work) ice_plug_aux_dev(pf); if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { - struct iidc_event *event; + struct iidc_rdma_event *event; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event) { - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); } @@ -8330,11 +8330,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @np: net device to configure * @filter_dev: device on which filter is added * @cls_flower: offload data + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added or deleted, + * negative error code otherwise. */ static int ice_setup_tc_cls_flower(struct ice_netdev_priv *np, struct net_device *filter_dev, - struct flow_cls_offload *cls_flower) + struct flow_cls_offload *cls_flower, + bool ingress) { struct ice_vsi *vsi = np->vsi; @@ -8343,7 +8348,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, switch (cls_flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(filter_dev, vsi, cls_flower); + return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress); case FLOW_CLS_DESTROY: return ice_del_cls_flower(vsi, cls_flower); default: @@ -8352,20 +8357,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, } /** - * ice_setup_tc_block_cb - callback handler registered for TC block + * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block * @type: TC SETUP type * @type_data: TC flower offload data that contains user input * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. */ static int -ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, + void *cb_priv) { struct ice_netdev_priv *np = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, np->vsi->netdev, - type_data); + type_data, true); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_setup_tc_block_cb_egress - callback handler for egress TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. + */ +static int +ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct ice_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, np->vsi->netdev, + type_data, false); default: return -EOPNOTSUPP; } @@ -9310,27 +9341,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct ice_netdev_priv *np = netdev_priv(netdev); + enum flow_block_binder_type binder_type; + struct iidc_rdma_core_dev_info *cdev; struct ice_pf *pf = np->vsi->back; + flow_setup_cb_t *flower_handler; bool locked = false; int err; switch (type) { case TC_SETUP_BLOCK: + binder_type = + ((struct flow_block_offload *)type_data)->binder_type; + + switch (binder_type) { + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: + flower_handler = ice_setup_tc_block_cb_ingress; + break; + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: + flower_handler = ice_setup_tc_block_cb_egress; + break; + default: + return -EOPNOTSUPP; + } + return flow_block_cb_setup_simple(type_data, &ice_block_cb_list, - ice_setup_tc_block_cb, - np, np, true); + flower_handler, + np, np, false); case TC_SETUP_QDISC_MQPRIO: if (ice_is_eswitch_mode_switchdev(pf)) { netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); return -EOPNOTSUPP; } - if (pf->adev) { + cdev = pf->cdev_info; + if (cdev && cdev->adev) { mutex_lock(&pf->adev_mutex); - device_lock(&pf->adev->dev); + device_lock(&cdev->adev->dev); locked = true; - if (pf->adev->dev.driver) { + if (cdev->adev->dev.driver) { netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); err = -EBUSY; goto adev_unlock; @@ -9344,7 +9393,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, adev_unlock: if (locked) { - device_unlock(&pf->adev->dev); + device_unlock(&cdev->adev->dev); mutex_unlock(&pf->adev_mutex); } return err; @@ -9380,7 +9429,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, priv->netdev, (struct flow_cls_offload *) - type_data); + type_data, false); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 1fd1ae03eb90..b79a148ed0f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -305,6 +305,9 @@ u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, u32 hi, lo, lo2; u8 tmr_idx; + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + tmr_idx = ice_get_ptp_src_clock_index(hw); guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); /* Read the system timestamp pre PHC read */ @@ -1624,14 +1627,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, int pin_desc_idx; u8 tmr_idx; - /* Reject requests with unsupported flags */ - - if (rq->flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; chan = rq->index; @@ -1802,9 +1797,6 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, struct ice_hw *hw = &pf->hw; int pin_desc_idx; - if (rq->flags & ~PTP_PEROUT_PHASE) - return -EOPNOTSUPP; - pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); if (pin_desc_idx < 0) return -EIO; @@ -2737,6 +2729,11 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->enable = ice_ptp_gpio_enable; info->verify = ice_verify_pin; + info->supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; + info->supported_perout_flags = PTP_PEROUT_PHASE; + switch (pf->hw.mac_type) { case ICE_MAC_E810: ice_ptp_set_funcs_e810(pf); @@ -2986,6 +2983,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work) } /** + * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild + * @pf: Board private structure + * @rebuild: rebuild if true, prepare if false + * @reset_type: the reset type being performed + */ +static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, + enum ice_reset_req reset_type) +{ + struct list_head *entry; + + list_for_each(entry, &pf->adapter->ports.ports) { + struct ice_ptp_port *port = list_entry(entry, + struct ice_ptp_port, + list_node); + struct ice_pf *peer_pf = ptp_port_to_pf(port); + + if (!ice_is_primary(&peer_pf->hw)) { + if (rebuild) + ice_ptp_rebuild(peer_pf, reset_type); + else + ice_ptp_prepare_for_reset(peer_pf, reset_type); + } + } +} + +/** * ice_ptp_prepare_for_reset - Prepare PTP for reset * @pf: Board private structure * @reset_type: the reset type being performed @@ -2993,6 +3016,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work) void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_ptp *ptp = &pf->ptp; + struct ice_hw *hw = &pf->hw; u8 src_tmr; if (ptp->state != ICE_PTP_READY) @@ -3008,6 +3032,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) return; + if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825) + ice_ptp_prepare_rebuild_sec(pf, false, reset_type); + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); /* Disable periodic outputs */ @@ -3129,13 +3156,6 @@ err: dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } -static bool ice_is_primary(struct ice_hw *hw) -{ - return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? - !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : - true; -} - static int ice_ptp_setup_adapter(struct ice_pf *pf) { if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) @@ -3355,17 +3375,16 @@ void ice_ptp_init(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; - int lane_num, err; + int err; ptp->state = ICE_PTP_INITIALIZING; - lane_num = ice_get_phy_lane_number(hw); - if (lane_num < 0) { - err = lane_num; + if (hw->lane_num < 0) { + err = hw->lane_num; goto err_exit; } + ptp->port.port_num = hw->lane_num; - ptp->port.port_num = (u8)lane_num; ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 89bb8461284a..ccac84eb34c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -240,7 +240,7 @@ static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg = { .opcode = ice_sbq_msg_rd, - .dest_dev = cgu, + .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr }; int err; @@ -272,7 +272,7 @@ static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg = { .opcode = ice_sbq_msg_wr, - .dest_dev = cgu, + .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr, .data = val }; @@ -874,8 +874,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, */ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + wr32(hw, GLTSYN_CMD, cmd_val); } @@ -891,6 +895,9 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) { struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ice_flush(hw); @@ -919,16 +926,24 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) * * Return: destination sideband queue PHY device. */ -static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw, - u8 port) +static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw, + u8 port) { - /* On a single complex E825, PHY 0 is always destination device phy_0 + u8 curr_phy, tgt_phy; + + tgt_phy = port >= hw->ptp.ports_per_phy; + curr_phy = hw->lane_num >= hw->ptp.ports_per_phy; + /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY. + * On a single complex E825C, PHY 0 is always destination device phy_0 * and PHY 1 is phy_0_peer. + * On dual complex E825C, device phy_0 points to PHY on a current + * complex and phy_0_peer to PHY on a different complex. */ - if (port >= hw->ptp.ports_per_phy) - return eth56g_phy_1; + if ((!ice_is_dual(hw) && tgt_phy == 1) || + (ice_is_dual(hw) && tgt_phy != curr_phy)) + return ice_sbq_dev_phy_0_peer; else - return eth56g_phy_0; + return ice_sbq_dev_phy_0; } /** @@ -2417,6 +2432,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold) static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, u64 *phy_time, u64 *phc_time) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u64 tx_time, rx_time; u32 zo, lo; u8 tmr_idx; @@ -2436,8 +2452,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, ice_ptp_exec_tmr_cmd(hw); /* Read the captured PHC time from the shadow time registers */ - zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); - lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + if (ice_is_primary(hw)) { + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + } else { + zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx)); + } *phc_time = (u64)lo << 32 | zo; /* Read the captured PHY time from the PHY shadow registers */ @@ -2574,6 +2595,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset) */ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); u32 lo, hi; u64 incval; u8 tmr_idx; @@ -2599,8 +2621,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) if (err) return err; - lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); - hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + if (ice_is_primary(hw)) { + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + } else { + lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx)); + } incval = (u64)hi << 32 | lo; err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); @@ -2631,25 +2658,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) } /** - * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access - * @hw: pointer to HW struct - * @enable: Enable or disable access - * - * Enable sideband devices (PHY and others) access. - */ -static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) -{ - u32 val = rd32(hw, PF_SB_REM_DEV_CTL); - - if (enable) - val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1); - else - val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1)); - - wr32(hw, PF_SB_REM_DEV_CTL, val); -} - -/** * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization * @hw: pointer to HW struct * @@ -2659,8 +2667,6 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) */ static int ice_ptp_init_phc_e825(struct ice_hw *hw) { - ice_sb_access_ena_eth56g(hw, true); - /* Initialize the Clock Generation Unit */ return ice_init_cgu_e82x(hw); } @@ -2747,8 +2753,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw) params->num_phys = 2; ptp->ports_per_phy = 4; ptp->num_lports = params->num_phys * ptp->ports_per_phy; - - ice_sb_access_ena_eth56g(hw, true); } /* E822 family functions @@ -2781,7 +2785,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw, msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); } - msg->dest_dev = rmn_0; + msg->dest_dev = ice_sbq_dev_phy_0; } /** @@ -3104,7 +3108,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw, if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) return -EINVAL; - msg->dest_dev = rmn_0; + msg->dest_dev = ice_sbq_dev_phy_0; if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) addr = Q_0_BASE + offset; @@ -4823,7 +4827,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_rd; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { @@ -4853,7 +4857,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_wr; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; msg.data = val; err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index e5925ccc2613..83f20fa7ace7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -444,11 +444,6 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw) } } -static inline bool ice_is_dual(struct ice_hw *hw) -{ - return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); -} - #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index fb7a1b9a4313..cb08746556a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr, { switch (flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower, + true); case FLOW_CLS_DESTROY: return ice_del_cls_flower(repr->src_vsi, flower); default: @@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr) static void ice_repr_rem_vf(struct ice_repr *repr) { ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); + ice_pass_vf_tx_lldp(repr->src_vsi, true); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); @@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr) if (err) goto err_netdev; + err = ice_drop_vf_tx_lldp(repr->src_vsi, true); + if (err) + goto err_drop_lldp; + err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); if (err) goto err_cfg_vsi; @@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr) return 0; err_cfg_vsi: + ice_pass_vf_tx_lldp(repr->src_vsi, true); +err_drop_lldp: unregister_netdev(repr->netdev); err_netdev: ice_devlink_destroy_vf_port(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index 3b0054faf70c..183dd5457d6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -46,13 +46,10 @@ struct ice_sbq_evt_desc { u8 data[24]; }; -enum ice_sbq_msg_dev { - eth56g_phy_0 = 0x02, - rmn_0 = 0x02, - rmn_1 = 0x03, - rmn_2 = 0x04, - cgu = 0x06, - eth56g_phy_1 = 0x0D, +enum ice_sbq_dev_id { + ice_sbq_dev_phy_0 = 0x02, + ice_sbq_dev_cgu = 0x06, + ice_sbq_dev_phy_0_peer = 0x0D, }; enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index f1648cf103b7..0e4dc1a5cff0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -63,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf) if (vf->lan_vsi_idx != ICE_NO_VSI) { ice_vf_vsi_release(vf); vf->num_mac = 0; + vf->num_mac_lldp = 0; } last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; @@ -1402,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_lock(&vf->cfg_lock); + while (!trusted && vf->num_mac_lldp) + ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false); + vf->trusted = trusted; ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 4a91e0aaf0a5..9d9a7edd3618 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; @@ -5978,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index ea39b999a0d0..fb9ea7f8ef44 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -12,14 +12,11 @@ /** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags - * @headers: Pointer to TC flower filter header structure * @fltr: Pointer to outer TC filter structure * - * Determine lookup count based on TC flower input for switch filter. + * Return: lookup count based on TC flower input for a switch filter. */ -static int -ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, - struct ice_tc_flower_fltr *fltr) +static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr) { int lkups_cnt = 1; /* 0th lookup is metadata */ @@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev, fltr->action.fltr_act = action; if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { + ice_tc_is_dev_uplink(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(filter_dev); fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; } else { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, { fltr->action.fltr_act = ICE_DROP_PACKET; - if (ice_is_port_repr_netdev(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { + if (!ice_tc_is_dev_uplink(filter_dev) && + !(ice_is_port_repr_netdev(filter_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, return 0; } +static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr) +{ + return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP); +} + +static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + if (!ice_is_switchdev_running(vsi->back)) + return false; + + uplink = vsi->back->eswitch.uplink_vsi; + return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET && + ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID; +} + +static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + uplink = vsi->back->eswitch.uplink_vsi; + return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == uplink; +} + +static struct ice_tc_flower_fltr * +ice_find_pf_tx_lldp_fltr(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (ice_is_fltr_pf_tx_lldp(fltr)) + return fltr; + + return NULL; +} + +static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + ice_for_each_vf(pf, bkt, vf) + if (vf->lldp_tx_ena) + return true; + + return false; +} + +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit) +{ + struct ice_rule_query_data remove_entry = { + .rid = vsi->vf->lldp_recipe_id, + .rule_id = vsi->vf->lldp_rule_id, + .vsi_handle = vsi->idx, + }; + struct ice_pf *pf = vsi->back; + int err; + + if (vsi->vf->lldp_tx_ena) + return 0; + + if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back)) + return -EINVAL; + + if (!deinit && ice_any_vf_lldp_tx_ena(pf)) + return -EINVAL; + + err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry); + if (!err) + vsi->vf->lldp_tx_ena = true; + + return err; +} + +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init) +{ + struct ice_rule_query_data rule_added; + struct ice_adv_rule_info rinfo = { + .priority = 7, + .src_vsi = vsi->idx, + .sw_act = { + .src = vsi->idx, + .flag = ICE_FLTR_TX, + .fltr_act = ICE_DROP_PACKET, + .vsi_handle = vsi->idx, + }, + .flags_info.act_valid = true, + }; + struct ice_adv_lkup_elem list[3]; + struct ice_pf *pf = vsi->back; + int err; + + if (!init && !vsi->vf->lldp_tx_ena) + return 0; + + memset(list, 0, sizeof(list)); + ice_rule_add_direction_metadata(&list[0]); + ice_rule_add_src_vsi_metadata(&list[1]); + list[2].type = ICE_ETYPE_OL; + list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP); + list[2].m_u.ethertype.ethtype_id = htons(0xFFFF); + + err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo, + &rule_added); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to add an LLDP rule to VSI 0x%X: %d\n", + vsi->idx, err); + } else { + vsi->vf->lldp_recipe_id = rule_added.rid; + vsi->vf->lldp_rule_id = rule_added.rule_id; + vsi->vf->lldp_tx_ena = false; + } + + return err; +} + +static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { + if (!ice_is_fltr_vf_tx_lldp(fltr)) + continue; + ice_pass_vf_tx_lldp(fltr->src_vsi, true); + break; + } +} + +static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf) +{ + int i; + + /* Make the VF LLDP fwd to uplink rule dormant */ + ice_for_each_vsi(pf, i) { + struct ice_vsi *vf_vsi = pf->vsi[i]; + + if (vf_vsi && vf_vsi->type == ICE_VSI_VF) + ice_drop_vf_tx_lldp(vf_vsi, false); + } +} + static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct ice_adv_rule_info rule_info = { 0 }; struct ice_rule_query_data rule_added; struct ice_hw *hw = &vsi->back->hw; @@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) return -EOPNOTSUPP; } - lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_pass_vf_tx_lldp(vsi, false); + + lkups_cnt = ice_tc_count_lkups(flags, fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { + /* PF to Uplink */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; @@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); goto exit; } + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_add_pf_lldp_drop_rule(vsi); + /* store the output params, which are needed later for removing * advanced switch filter */ @@ -985,7 +1141,6 @@ static int ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; struct ice_adv_lkup_elem *list; @@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, return PTR_ERR(dest_vsi); } - lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); + lkups_cnt = ice_tc_count_lkups(flags, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; case ICE_DROP_PACKET: - rule_info.sw_act.flag |= ICE_FLTR_RX; - rule_info.sw_act.src = hw->pf_id; + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + } rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; default: @@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); @@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, * @filter_dev: Pointer to device on which filter is being added * @f: Pointer to struct flow_cls_offload * @fltr: Pointer to filter structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was parsed successfully, -EINVAL if the flower + * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured + * for the given VSI. */ static int ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr *fltr) + struct ice_tc_flower_fltr *fltr, bool ingress) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f); @@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } + if (!ingress) { + bool switchdev = + ice_is_eswitch_mode_switchdev(vsi->back); + + if (switchdev != (n_proto_key == ETH_P_LLDP)) { + NL_SET_ERR_MSG_FMT_MOD(fltr->extack, + "%sLLDP filtering is not supported on egress in %s mode", + switchdev ? "Non-" : "", + switchdev ? "switchdev" : + "legacy"); + return -EOPNOTSUPP; + } + } + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); headers->l3_key.ip_proto = match.key->ip_proto; @@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return -EINVAL; } } + + /* Ingress filter on representor results in an egress filter in HW + * and vice versa + */ + ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress; + fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS : + ICE_ESWITCH_FLTR_EGRESS; + return 0; } @@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_pf *pf = vsi->back; int err; + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_del_pf_lldp_drop_rule(pf); + + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_drop_vf_tx_lldp(vsi, false); + rule_rem.rid = fltr->rid; rule_rem.rule_id = fltr->rule_id; rule_rem.vsi_handle = fltr->dest_vsi_handle; @@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * @vsi: Pointer to VSI * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr + * @ingress: if the rule is added to an ingress block * * This function parses TC-flower input fields, parses action, * and adds a filter. + * + * Return: 0 if the filter was successfully added, + * negative error code otherwise. */ static int ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr **__fltr) + struct ice_tc_flower_fltr **__fltr, bool ingress) { struct ice_tc_flower_fltr *fltr; int err; @@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, fltr->src_vsi = vsi; INIT_HLIST_NODE(&fltr->tc_flower_node); - err = ice_parse_cls_flower(netdev, vsi, f, fltr); + err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress); if (err < 0) goto err; @@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) * @netdev: Pointer to filter device * @vsi: Pointer to VSI * @cls_flower: Pointer to flower offload structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added, + * negative error code otherwise. */ -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower) +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress) { struct netlink_ext_ack *extack = cls_flower->common.extack; struct net_device *vsi_netdev = vsi->netdev; @@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, } /* prep and add TC-flower filter in HW */ - err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress); if (err) return err; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index d84f153517ec..8a3ab2f22af9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) } struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue); -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower); -int -ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress); +int ice_del_cls_flower(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init); +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit); static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 1e4f6f6ee449..0e5107fe62ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2440,19 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ eth = (struct ethhdr *)skb_mac_header(skb); - if (unlikely((skb->priority == TC_PRIO_CONTROL || - eth->h_proto == htons(ETH_P_LLDP)) && - vsi->type == ICE_VSI_PF && - vsi->port_info->qos_cfg.is_sw_lldp)) - offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | - ICE_TX_CTX_DESC_SWTCH_UPLINK << - ICE_TXD_CTX_QW1_CMD_S); - ice_tstamp(tx_ring, skb, first, &offload); if ((ice_is_switchdev_running(vsi->back) || ice_lag_is_switchdev_running(vsi->back)) && vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); + else if (unlikely((skb->priority == TC_PRIO_CONTROL || + eth->h_proto == htons(ETH_P_LLDP)) && + vsi->type == ICE_VSI_PF && + vsi->port_info->qos_cfg.is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | + ICE_TX_CTX_DESC_SWTCH_UPLINK << + ICE_TXD_CTX_QW1_CMD_S); + + ice_tstamp(tx_ring, skb, first, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 0aab21113cc4..3d68f465952d 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -19,6 +19,7 @@ #include "ice_vlan_mode.h" #include "ice_fwlog.h" #include <linux/wait.h> +#include <net/dscp.h> static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -695,7 +696,6 @@ struct ice_dcb_app_priority_table { #define ICE_MAX_USER_PRIORITY 8 #define ICE_DCBX_MAX_APPS 64 -#define ICE_DSCP_NUM_VAL 64 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 @@ -718,9 +718,9 @@ struct ice_dcbx_cfg { u8 pfc_mode; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; /* when DSCP mapping defined by user set its bit to 1 */ - DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); + DECLARE_BITMAP(dscp_mapped, DSCP_MAX); /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ - u8 dscp_map[ICE_DSCP_NUM_VAL]; + u8 dscp_map[DSCP_MAX]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 @@ -970,6 +970,7 @@ struct ice_hw { u8 intrl_gran; struct ice_ptp_hw ptp; + s8 lane_num; /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 815ad0bfe832..48cd533e93b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf) vsi->num_vlan = 0; vf->num_mac = 0; + vf->num_mac_lldp = 0; memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); } @@ -1401,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) rcu_read_unlock(); return ctrl_vsi; } + +/** + * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses + * @vf: a VF to add the address to + * @vsi: the corresponding VSI + * @incr: is the rule added or removed + */ +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr) +{ + bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags); + bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + bool is_ena; + + if (WARN_ON(!vsi)) { + vf->num_mac_lldp = 0; + return; + } + + vf->num_mac_lldp += incr ? 1 : -1; + is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + + if (was_ena != is_ena) + ice_vsi_cfg_sw_lldp(vsi, false, is_ena); +} diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 799b2c1f1184..482f4285fd35 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -124,6 +124,7 @@ struct ice_vf { u8 spoofchk:1; u8 link_forced:1; u8 link_up:1; /* only valid if VF link is forced */ + u8 lldp_tx_ena:1; u32 ptp_caps; @@ -134,6 +135,7 @@ struct ice_vf { unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; + u16 num_mac_lldp; u16 num_vf_qs; /* num of queue configured per VF */ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ #define ICE_INNER_VLAN_STRIP_ENA BIT(0) @@ -149,6 +151,9 @@ struct ice_vf { /* devlink port data */ struct devlink_port devlink_port; + u16 lldp_recipe_id; + u16 lldp_rule_id; + u16 num_msix; /* num of MSI-X configured on this VF */ struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; }; @@ -180,6 +185,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) return vf->port_vlan_info.tpid; } +static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf) +{ + return vf->num_mac_lldp && vf->trusted; +} + /* VF Hash Table access functions * * These functions provide abstraction for interacting with the VF hash table. @@ -245,6 +255,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); int ice_reset_vf(struct ice_vf *vf, u32 flags); void ice_reset_all_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi); +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr); #else /* CONFIG_PCI_IOV */ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 6446d0fcc052..eeeb9968e477 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2266,6 +2266,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) } /** + * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address + * @mac: address to check + * + * Return: true if the address is one of the three possible LLDP multicast + * addresses, false otherwise. + */ +static bool ice_is_mc_lldp_eth_addr(const u8 *mac) +{ + const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00}; + + if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base))) + return false; + + return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00); +} + +/** + * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC + * @vf: a VF to add the address to + * @mac: address to check + * + * Return: true if the VF is allowed to add such MAC address, false otherwise. + */ +static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + + if (is_unicast_ether_addr(mac) && + !ice_can_vf_change_mac((struct ice_vf *)vf)) { + dev_err(dev, + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return false; + } + + if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) { + dev_warn(dev, + "An untrusted VF %u is attempting to configure an LLDP multicast address\n", + vf->vf_id); + return false; + } + + return true; +} + +/** * ice_vc_add_mac_addr - attempt to add the MAC address passed in * @vf: pointer to the VF info * @vsi: pointer to the VF's VSI @@ -2283,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) return 0; - if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { - dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + if (!ice_vc_can_add_mac(vf, mac_addr)) return -EPERM; - } ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); if (ret == -EEXIST) { @@ -2301,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return ret; } else { vf->num_mac++; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, true); } ice_vfhw_mac_add(vf, vc_ether_addr); @@ -2395,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, ice_vfhw_mac_del(vf, vc_ether_addr); vf->num_mac--; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, false); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig index 1addd663acad..2c359a8551c7 100644 --- a/drivers/net/ethernet/intel/idpf/Kconfig +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -4,6 +4,7 @@ config IDPF tristate "Intel(R) Infrastructure Data Path Function Support" depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select DIMLIB select LIBETH help diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 2ce01a0b5898..83ac5e296382 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -17,3 +17,6 @@ idpf-y := \ idpf_vf_dev.o idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o + +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 70dbf80f3bb7..1e812c3f62f9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -191,6 +191,7 @@ struct idpf_vport_max_q { * @mb_intr_reg_init: Mailbox interrupt register initialization * @reset_reg_init: Reset register initialization * @trigger_reset: Trigger a reset to occur + * @ptp_reg_init: PTP register initialization */ struct idpf_reg_ops { void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); @@ -199,6 +200,7 @@ struct idpf_reg_ops { void (*reset_reg_init)(struct idpf_adapter *adapter); void (*trigger_reset)(struct idpf_adapter *adapter, enum idpf_flags trig_cause); + void (*ptp_reg_init)(const struct idpf_adapter *adapter); }; /** @@ -292,6 +294,9 @@ struct idpf_port_stats { * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up * @sw_marker_wq: workqueue for marker packets + * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping + * @tstamp_config: The Tx tstamp config + * @tstamp_task: Tx timestamping task */ struct idpf_vport { u16 num_txq; @@ -336,6 +341,10 @@ struct idpf_vport { bool link_up; wait_queue_head_t sw_marker_wq; + + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct kernel_hwtstamp_config tstamp_config; + struct work_struct tstamp_task; }; /** @@ -480,6 +489,13 @@ struct idpf_vport_config { struct idpf_vc_xn_manager; +#define idpf_for_each_vport(adapter, iter) \ + for (struct idpf_vport **__##iter = &(adapter)->vports[0], \ + *iter = (adapter)->max_vports ? *__##iter : NULL; \ + iter; \ + iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \ + *__##iter : NULL) + /** * struct idpf_adapter - Device data struct generated on probe * @pdev: PCI device struct given on probe @@ -532,6 +548,7 @@ struct idpf_vc_xn_manager; * @vector_lock: Lock to protect vector distribution * @queue_lock: Lock to protect queue distribution * @vc_buf_lock: Lock to protect virtchnl buffer + * @ptp: Storage for PTP-related data */ struct idpf_adapter { struct pci_dev *pdev; @@ -589,6 +606,8 @@ struct idpf_adapter { struct mutex vector_lock; struct mutex queue_lock; struct mutex vc_buf_lock; + + struct idpf_ptp *ptp; }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d..9642494a67d8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -123,9 +123,12 @@ struct idpf_ctlq_info { /** * enum idpf_mbx_opc - PF/VF mailbox commands * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP + * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to + * any peer driver */ enum idpf_mbx_opc { idpf_mbq_opc_send_msg_to_cp = 0x0801, + idpf_mbq_opc_send_msg_to_peer_drv = 0x0804, }; /* API supported for control queue management */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 41e4bd49402a..3fae81f1f988 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -4,6 +4,7 @@ #include "idpf.h" #include "idpf_lan_pf_regs.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" #define IDPF_PF_ITR_IDX_SPACING 0x4 @@ -149,6 +150,18 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter, } /** + * idpf_ptp_reg_init - Initialize required registers + * @adapter: Driver specific private structure + * + * Set the bits required for enabling shtime and cmd execution + */ +static void idpf_ptp_reg_init(const struct idpf_adapter *adapter) +{ + adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M; + adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M; +} + +/** * idpf_reg_ops_init - Initialize register API function pointers * @adapter: Driver specific private structure */ @@ -159,6 +172,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter) adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init; adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init; adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset; + adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 59b1a1a09996..9bdb309b668e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_ptp.h" /** * idpf_get_rxnfc - command to get RX flow classification rules @@ -1312,6 +1313,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev, return 0; } +/** + * idpf_get_timestamp_filters - Get the supported timestamping mode + * @vport: Virtual port structure + * @info: ethtool timestamping info structure + * + * Get the Tx/Rx timestamp filters. + */ +static void idpf_get_timestamp_filters(const struct idpf_vport *vport, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + if (!vport->tx_tstamp_caps || + vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) + return; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE; + + info->tx_types |= BIT(HWTSTAMP_TX_ON); +} + +/** + * idpf_get_ts_info - Get device PHC association + * @netdev: network interface device structure + * @info: ethtool timestamping info structure + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) + return -EBUSY; + + vport = idpf_netdev_to_vport(netdev); + + if (!vport->adapter->ptp) { + err = -EOPNOTSUPP; + goto unlock; + } + + if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && + vport->adapter->ptp->clock) { + info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); + idpf_get_timestamp_filters(vport, info); + } else { + pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); + err = ethtool_op_get_ts_info(netdev, info); + } + +unlock: + mutex_unlock(&np->adapter->vport_ctrl_lock); + + return err; +} + static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, @@ -1336,6 +1402,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_ringparam = idpf_get_ringparam, .set_ringparam = idpf_set_ringparam, .get_link_ksettings = idpf_get_link_ksettings, + .get_ts_info = idpf_get_ts_info, }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h index 24edb8a6ec2e..cc9aa2b6a14a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h @@ -53,6 +53,10 @@ #define PF_FW_ATQH_ATQH_M GENMASK(9, 0) #define PF_FW_ATQT (PF_FW_BASE + 0x24) +/* Timesync registers */ +#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0) +#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2) + /* Interrupts */ #define PF_GLINT_BASE 0x08900000 #define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000)) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index 8c7f8ef8f1a1..7492d1713243 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw { u8 flex; }; -struct idpf_flex_tx_ctx_desc { +union idpf_flex_tx_ctx_desc { + /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */ + struct { + __le64 qw0; +#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32) + __le64 qw1; +#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30) +#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63) + } tsyn; + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ struct { struct idpf_flex_tx_tso_ctx_qw qw0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 3a033ce19cda..bab12ecb2df5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" static const struct net_device_ops idpf_netdev_ops; @@ -144,22 +145,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) } /** - * idpf_set_mb_vec_id - Set vector index for mailbox - * @adapter: adapter structure to access the vector chunks - * - * The first vector id in the requested vector chunks from the CP is for - * the mailbox - */ -static void idpf_set_mb_vec_id(struct idpf_adapter *adapter) -{ - if (adapter->req_vec_chunks) - adapter->mb_vector.v_idx = - le16_to_cpu(adapter->caps.mailbox_vector_id); - else - adapter->mb_vector.v_idx = 0; -} - -/** * idpf_mb_intr_init - Initialize the mailbox interrupt * @adapter: adapter structure to store the mailbox vector */ @@ -349,7 +334,7 @@ int idpf_intr_req(struct idpf_adapter *adapter) goto free_irq; } - idpf_set_mb_vec_id(adapter); + adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); if (!vecids) { @@ -2344,6 +2329,60 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) mem->pa = 0; } +static int idpf_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return -EOPNOTSUPP; + } + + err = idpf_ptp_set_timestamp_mode(vport, config); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +static int idpf_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return 0; + } + + *config = vport->tstamp_config; + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, @@ -2356,4 +2395,6 @@ static const struct net_device_ops idpf_netdev_ops = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_hwtstamp_get = idpf_hwtstamp_get, + .ndo_hwtstamp_set = idpf_hwtstamp_set, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index b35713036a54..0efd9c0c7a90 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -168,6 +168,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free; } + err = pci_enable_ptm(pdev, NULL); + if (err) + pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n"); + /* set up for high or low dma */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) { @@ -199,9 +203,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_serv_wq_alloc; } - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", - WQ_UNBOUND | WQ_MEM_RECLAIM, 0, - dev_driver_string(dev), + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI, + 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->mbx_wq) { dev_err(dev, "Failed to allocate mailbox workqueue\n"); diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c new file mode 100644 index 000000000000..4f8725c85332 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -0,0 +1,873 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" + +/** + * idpf_ptp_get_access - Determine the access type of the PTP features + * @adapter: Driver specific private structure + * @direct: Capability that indicates the direct access + * @mailbox: Capability that indicates the mailbox access + * + * Return: the type of supported access for the PTP feature. + */ +static enum idpf_ptp_access +idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox) +{ + if (adapter->ptp->caps & direct) + return IDPF_PTP_DIRECT; + else if (adapter->ptp->caps & mailbox) + return IDPF_PTP_MAILBOX; + else + return IDPF_PTP_NONE; +} + +/** + * idpf_ptp_get_features_access - Determine the access type of PTP features + * @adapter: Driver specific private structure + * + * Fulfill the adapter structure with type of the supported PTP features + * access. + */ +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 direct, mailbox; + + /* Get the device clock time */ + direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB; + ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Set the device clock time */ + direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Adjust the device clock time */ + direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK; + mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB; + ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Tx timestamping */ + direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS; + mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB; + ptp->tx_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); +} + +/** + * idpf_ptp_enable_shtime - Enable shadow time and execute a command + * @adapter: Driver specific private structure + */ +static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter) +{ + u32 shtime_enable, exec_cmd; + + /* Get offsets */ + shtime_enable = adapter->ptp->cmd.shtime_enable_mask; + exec_cmd = adapter->ptp->cmd.exec_cmd_mask; + + /* Set the shtime en and the sync field */ + writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); + writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); +} + +/** + * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * + * Return: the device clock time. + */ +static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 hi, lo; + + spin_lock(&ptp->read_dev_clk_lock); + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + idpf_ptp_enable_shtime(adapter); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + spin_unlock(&ptp->read_dev_clk_lock); + + return ((u64)hi << 32) | lo; +} + +/** + * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * @src_clk: Returned main timer value in nanoseconds unit + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts, + u64 *src_clk) +{ + struct idpf_ptp_dev_timers clk_time; + int err; + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + err = idpf_ptp_get_dev_clk_time(adapter, &clk_time); + if (err) + return err; + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + *src_clk = clk_time.dev_clk_time_ns; + + return 0; +} + +/** + * idpf_ptp_read_src_clk_reg - Read the main timer value + * @adapter: Driver specific private structure + * @src_clk: Returned main timer value in nanoseconds unit + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock time on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk, + struct ptp_system_timestamp *sts) +{ + switch (adapter->ptp->get_dev_clk_time_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_MAILBOX: + return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk); + case IDPF_PTP_DIRECT: + *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * idpf_ptp_gettimex64 - Get the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure to hold the current time value + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock value in ns, after converting it into a timespec + * struct on success, -errno otherwise. + */ +static int idpf_ptp_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + u64 time_ns; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts); + if (err) + return -EACCES; + + *ts = ns_to_timespec64(time_ns); + + return 0; +} + +/** + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx + * queue group. + * @grp: receive queue group in which Rx timestamp is enabled + * @split: Indicates whether the queue model is split or single queue + * @systime: Cached system time + */ +static void +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, + u64 systime) +{ + struct idpf_rx_queue *rxq; + u16 i; + + if (!split) { + for (i = 0; i < grp->singleq.num_rxq; i++) { + rxq = grp->singleq.rxqs[i]; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } else { + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { + rxq = &grp->splitq.rxq_sets[i]->rxq; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } +} + +/** + * idpf_ptp_update_cached_phctime - Update the cached PHC time values + * @adapter: Driver specific private structure + * + * This function updates the system time values which are cached in the adapter + * structure and the Rx queues. + * + * This function must be called periodically to ensure that the cached value + * is never more than 2 seconds old. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter) +{ + u64 systime; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL); + if (err) + return -EACCES; + + /* Update the cached PHC time stored in the adapter structure. + * These values are used to extend Tx timestamp values to 64 bit + * expected by the stack. + */ + WRITE_ONCE(adapter->ptp->cached_phc_time, systime); + WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies); + + idpf_for_each_vport(adapter, vport) { + bool split; + + if (!vport || !vport->rxq_grps) + continue; + + split = idpf_is_queue_model_split(vport->rxq_model); + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + idpf_ptp_update_phctime_rxq_grp(grp, split, systime); + } + } + + return 0; +} + +/** + * idpf_ptp_settime64 - Set the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + u64 ns; + + access = adapter->ptp->set_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + ns = timespec64_to_ns(ts); + + err = idpf_ptp_set_dev_clk_time(adapter, ns); + if (err) { + pci_err(adapter->pdev, "Failed to set the time, err: %pe\n", + ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) +{ + struct timespec64 now, then; + int err; + + err = idpf_ptp_gettimex64(info, &now, NULL); + if (err) + return err; + + then = ns_to_timespec64(delta); + now = timespec64_add(now, then); + + return idpf_ptp_settime64(info, &now); +} + +/** + * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + /* Hardware only supports atomic adjustments using signed 32-bit + * integers. For any adjustment outside this range, perform + * a non-atomic get->adjust->set flow. + */ + if (delta > S32_MAX || delta < S32_MIN) + return idpf_ptp_adjtime_nonatomic(info, delta); + + err = idpf_ptp_adj_dev_clk_time(adapter, delta); + if (err) { + pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n", + delta, ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjfine - Adjust clock increment rate + * @info: the driver's PTP info structure + * @scaled_ppm: Parts per million with 16-bit fractional field + * + * Adjust the frequency of the clock by the indicated scaled ppm from the + * base frequency. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + u64 incval, diff; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + incval = adapter->ptp->base_incval; + + diff = adjust_by_scaled_ppm(incval, scaled_ppm); + err = idpf_ptp_adj_dev_clk_fine(adapter, diff); + if (err) + pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n", + scaled_ppm, ERR_PTR(err)); + + return 0; +} + +/** + * idpf_ptp_verify_pin - Verify if pin supports requested pin function + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_gpio_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx + * timestamp value to 64b. + * @cached_phc_time: recently cached copy of PHC time + * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value + * + * Hardware captures timestamps which contain only 32 bits of nominal + * nanoseconds, as opposed to the 64bit timestamps that the stack expects. + * + * Return: Tx timestamp value extended to 64 bits based on cached PHC time. + */ +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp) +{ + u32 delta, phc_time_lo; + u64 ns; + + /* Extract the lower 32 bits of the PHC time */ + phc_time_lo = (u32)cached_phc_time; + + /* Calculate the delta between the lower 32bits of the cached PHC + * time and the in_timestamp value. + */ + delta = in_timestamp - phc_time_lo; + + if (delta > U32_MAX / 2) { + /* Reverse the delta calculation here */ + delta = phc_time_lo - in_timestamp; + ns = cached_phc_time - delta; + } else { + ns = cached_phc_time + delta; + } + + return ns; +} + +/** + * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds + * @vport: Virtual port structure + * @in_tstamp: Ingress/egress timestamp value + * + * It is assumed that the caller verifies the timestamp is valid prior to + * calling this function. + * + * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC + * time stored in the device private PTP structure as the basis for timestamp + * extension. + * + * Return: Tx timestamp value extended to 64 bits. + */ +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp) +{ + struct idpf_ptp *ptp = vport->adapter->ptp; + unsigned long discard_time; + + discard_time = ptp->cached_phc_jiffies + 2 * HZ; + + if (time_is_before_jiffies(discard_time)) + return 0; + + return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time, + lower_32_bits(in_tstamp)); +} + +/** + * idpf_ptp_request_ts - Request an available Tx timestamp index + * @tx_q: Transmit queue on which the Tx timestamp is requested + * @skb: The SKB to associate with this timestamp request + * @idx: Index of the Tx timestamp latch + * + * Request tx timestamp index negotiated during PTP init that will be set into + * Tx descriptor. + * + * Return: 0 and the index that can be provided to Tx descriptor on success, + * -errno otherwise. + */ +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + struct list_head *head; + + /* Get the index from the free latches list */ + spin_lock(&tx_q->cached_tstamp_caps->latches_lock); + + head = &tx_q->cached_tstamp_caps->latches_free; + if (list_empty(head)) { + spin_unlock(&tx_q->cached_tstamp_caps->latches_lock); + return -ENOBUFS; + } + + ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp, + list_member); + list_del(&ptp_tx_tstamp->list_member); + + ptp_tx_tstamp->skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Move the element to the used latches list */ + list_add(&ptp_tx_tstamp->list_member, + &tx_q->cached_tstamp_caps->latches_in_use); + spin_unlock(&tx_q->cached_tstamp_caps->latches_lock); + + *idx = ptp_tx_tstamp->idx; + + return 0; +} + +/** + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping + * @vport: Virtual port structure + * @rx_filter: Receive timestamp filter + */ +static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) +{ + bool enable = true, splitq; + + splitq = idpf_is_queue_model_split(vport->rxq_model); + + if (rx_filter == HWTSTAMP_FILTER_NONE) { + enable = false; + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + } else { + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + struct idpf_rx_queue *rx_queue; + u16 j, num_rxq; + + if (splitq) + num_rxq = grp->splitq.num_rxq_sets; + else + num_rxq = grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (splitq) + rx_queue = &grp->splitq.rxq_sets[j]->rxq; + else + rx_queue = grp->singleq.rxqs[j]; + + if (enable) + idpf_queue_set(PTP, rx_queue); + else + idpf_queue_clear(PTP, rx_queue); + } + } +} + +/** + * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode + * @vport: Virtual port structure + * @config: Hwtstamp settings requested or saved + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + vport->tstamp_config.tx_type = config->tx_type; + idpf_ptp_set_rx_tstamp(vport, config->rx_filter); + *config = vport->tstamp_config; + + return 0; +} + +/** + * idpf_tstamp_task - Delayed task to handle Tx tstamps + * @work: work_struct handle + */ +void idpf_tstamp_task(struct work_struct *work) +{ + struct idpf_vport *vport; + + vport = container_of(work, struct idpf_vport, tstamp_task); + + idpf_ptp_get_tx_tstamp(vport); +} + +/** + * idpf_ptp_do_aux_work - Do PTP periodic work + * @info: Driver's PTP info structure + * + * Return: Number of jiffies to periodic work. + */ +static long idpf_ptp_do_aux_work(struct ptp_clock_info *info) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + idpf_ptp_update_cached_phctime(adapter); + + return msecs_to_jiffies(500); +} + +/** + * idpf_ptp_set_caps - Set PTP capabilities + * @adapter: Driver specific private structure + * + * This function sets the PTP functions. + */ +static void idpf_ptp_set_caps(const struct idpf_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp->info; + + snprintf(info->name, sizeof(info->name), "%s-%s-clk", + KBUILD_MODNAME, pci_name(adapter->pdev)); + + info->owner = THIS_MODULE; + info->max_adj = adapter->ptp->max_adj; + info->gettimex64 = idpf_ptp_gettimex64; + info->settime64 = idpf_ptp_settime64; + info->adjfine = idpf_ptp_adjfine; + info->adjtime = idpf_ptp_adjtime; + info->verify = idpf_ptp_verify_pin; + info->enable = idpf_ptp_gpio_enable; + info->do_aux_work = idpf_ptp_do_aux_work; +} + +/** + * idpf_ptp_create_clock - Create PTP clock device for userspace + * @adapter: Driver specific private structure + * + * This function creates a new PTP clock device. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_create_clock(const struct idpf_adapter *adapter) +{ + struct ptp_clock *clock; + + idpf_ptp_set_caps(adapter); + + /* Attempt to register the clock before enabling the hardware. */ + clock = ptp_clock_register(&adapter->ptp->info, + &adapter->pdev->dev); + if (IS_ERR(clock)) { + pci_err(adapter->pdev, "PTP clock creation failed: %pe\n", + clock); + return PTR_ERR(clock); + } + + adapter->ptp->clock = clock; + + return 0; +} + +/** + * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a + * given vport. + * @vport: Virtual port structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries for a + * given vport. + */ +static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; + struct list_head *head; + + cancel_work_sync(&vport->tstamp_task); + + /* Remove list with free latches */ + spin_lock_bh(&vport->tx_tstamp_caps->latches_lock); + + head = &vport->tx_tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + /* Remove list with latches in use */ + head = &vport->tx_tstamp_caps->latches_in_use; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock); + + kfree(vport->tx_tstamp_caps); + vport->tx_tstamp_caps = NULL; +} + +/** + * idpf_ptp_release_tstamp - Release the Tx timestamps trackers + * @adapter: Driver specific private structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries. + */ +static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter) +{ + idpf_for_each_vport(adapter, vport) { + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + continue; + + idpf_ptp_release_vport_tstamp(vport); + } +} + +/** + * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability + * for a given tx queue. + * @txq: Transmit queue + * + * Since performing timestamp flows requires reading the device clock value and + * the support in the Control Plane, the function checks both factors and + * summarizes the support for the timestamping. + * + * Return: true if the timestamping is supported, false otherwise. + */ +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + if (!txq || !txq->cached_tstamp_caps) + return false; + else if (txq->cached_tstamp_caps->access) + return true; + else + return false; +} + +/** + * idpf_ptp_init - Initialize PTP hardware clock support + * @adapter: Driver specific private structure + * + * Set up the device for interacting with the PTP hardware clock for all + * functions. Function will allocate and register a ptp_clock with the + * PTP_1588_CLOCK infrastructure. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_init(struct idpf_adapter *adapter) +{ + struct timespec64 ts; + int err; + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) { + pci_dbg(adapter->pdev, "PTP capability is not detected\n"); + return -EOPNOTSUPP; + } + + adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL); + if (!adapter->ptp) + return -ENOMEM; + + /* add a back pointer to adapter */ + adapter->ptp->adapter = adapter; + + if (adapter->dev_ops.reg_ops.ptp_reg_init) + adapter->dev_ops.reg_ops.ptp_reg_init(adapter); + + err = idpf_ptp_get_caps(adapter); + if (err) { + pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err); + goto free_ptp; + } + + err = idpf_ptp_create_clock(adapter); + if (err) + goto free_ptp; + + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_schedule_worker(adapter->ptp->clock, 0); + + /* Write the default increment time value if the clock adjustments + * are enabled. + */ + if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) { + err = idpf_ptp_adj_dev_clk_fine(adapter, + adapter->ptp->base_incval); + if (err) + goto remove_clock; + } + + /* Write the initial time value if the set time operation is enabled */ + if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) { + ts = ktime_to_timespec64(ktime_get_real()); + err = idpf_ptp_settime64(&adapter->ptp->info, &ts); + if (err) + goto remove_clock; + } + + spin_lock_init(&adapter->ptp->read_dev_clk_lock); + + pci_dbg(adapter->pdev, "PTP init successful\n"); + + return 0; + +remove_clock: + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(adapter->ptp->clock); + adapter->ptp->clock = NULL; + +free_ptp: + kfree(adapter->ptp); + adapter->ptp = NULL; + + return err; +} + +/** + * idpf_ptp_release - Clear PTP hardware clock support + * @adapter: Driver specific private structure + */ +void idpf_ptp_release(struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + + if (!ptp) + return; + + if (ptp->tx_tstamp_access != IDPF_PTP_NONE && + ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + idpf_ptp_release_tstamp(adapter); + + if (ptp->clock) { + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(ptp->clock); + } + + kfree(ptp); + adapter->ptp = NULL; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h new file mode 100644 index 000000000000..a876749d6116 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _IDPF_PTP_H +#define _IDPF_PTP_H + +#include <linux/ptp_clock_kernel.h> + +/** + * struct idpf_ptp_cmd - PTP command masks + * @exec_cmd_mask: mask to trigger command execution + * @shtime_enable_mask: mask to enable shadow time + */ +struct idpf_ptp_cmd { + u32 exec_cmd_mask; + u32 shtime_enable_mask; +}; + +/* struct idpf_ptp_dev_clk_regs - PTP device registers + * @dev_clk_ns_l: low part of the device clock register + * @dev_clk_ns_h: high part of the device clock register + * @phy_clk_ns_l: low part of the PHY clock register + * @phy_clk_ns_h: high part of the PHY clock register + * @incval_l: low part of the increment value register + * @incval_h: high part of the increment value register + * @shadj_l: low part of the shadow adjust register + * @shadj_h: high part of the shadow adjust register + * @phy_incval_l: low part of the PHY increment value register + * @phy_incval_h: high part of the PHY increment value register + * @phy_shadj_l: low part of the PHY shadow adjust register + * @phy_shadj_h: high part of the PHY shadow adjust register + * @cmd: PTP command register + * @phy_cmd: PHY command register + * @cmd_sync: PTP command synchronization register + */ +struct idpf_ptp_dev_clk_regs { + /* Main clock */ + void __iomem *dev_clk_ns_l; + void __iomem *dev_clk_ns_h; + + /* PHY timer */ + void __iomem *phy_clk_ns_l; + void __iomem *phy_clk_ns_h; + + /* Main timer adjustments */ + void __iomem *incval_l; + void __iomem *incval_h; + void __iomem *shadj_l; + void __iomem *shadj_h; + + /* PHY timer adjustments */ + void __iomem *phy_incval_l; + void __iomem *phy_incval_h; + void __iomem *phy_shadj_l; + void __iomem *phy_shadj_h; + + /* Command */ + void __iomem *cmd; + void __iomem *phy_cmd; + void __iomem *cmd_sync; +}; + +/** + * enum idpf_ptp_access - the type of access to PTP operations + * @IDPF_PTP_NONE: no access + * @IDPF_PTP_DIRECT: direct access through BAR registers + * @IDPF_PTP_MAILBOX: access through mailbox messages + */ +enum idpf_ptp_access { + IDPF_PTP_NONE = 0, + IDPF_PTP_DIRECT, + IDPF_PTP_MAILBOX, +}; + +/** + * struct idpf_ptp_secondary_mbx - PTP secondary mailbox + * @peer_mbx_q_id: PTP mailbox queue ID + * @peer_id: Peer ID for PTP Device Control daemon + * @valid: indicates whether secondary mailblox is supported by the Control + * Plane + */ +struct idpf_ptp_secondary_mbx { + u16 peer_mbx_q_id; + u16 peer_id; + bool valid:1; +}; + +/** + * enum idpf_ptp_tx_tstamp_state - Tx timestamp states + * @IDPF_PTP_FREE: Tx timestamp index free to use + * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor + * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read + */ +enum idpf_ptp_tx_tstamp_state { + IDPF_PTP_FREE, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE, +}; + +/** + * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp + * @skb: the pointer to the SKB that received the completion tag + * @state: the state of the Tx timestamp + */ +struct idpf_ptp_tx_tstamp_status { + struct sk_buff *skb; + enum idpf_ptp_tx_tstamp_state state; +}; + +/** + * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping + * @list_member: the list member structure + * @tx_latch_reg_offset_l: Tx tstamp latch low register offset + * @tx_latch_reg_offset_h: Tx tstamp latch high register offset + * @skb: the pointer to the SKB for this timestamp request + * @tstamp: the Tx tstamp value + * @idx: the index of the Tx tstamp + */ +struct idpf_ptp_tx_tstamp { + struct list_head list_member; + u32 tx_latch_reg_offset_l; + u32 tx_latch_reg_offset_h; + struct sk_buff *skb; + u64 tstamp; + u32 idx; +}; + +/** + * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities + * @vport_id: the vport id + * @num_entries: the number of negotiated Tx timestamp entries + * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp + * @latches_lock: the lock to the lists of free/used timestamp indexes + * @status_lock: the lock to the status tracker + * @access: indicates an access to Tx timestamp + * @latches_free: the list of the free Tx timestamps latches + * @latches_in_use: the list of the used Tx timestamps latches + * @tx_tstamp_status: Tx tstamp status tracker + */ +struct idpf_ptp_vport_tx_tstamp_caps { + u32 vport_id; + u16 num_entries; + u16 tstamp_ns_lo_bit; + spinlock_t latches_lock; + spinlock_t status_lock; + bool access:1; + struct list_head latches_free; + struct list_head latches_in_use; + struct idpf_ptp_tx_tstamp_status tx_tstamp_status[]; +}; + +/** + * struct idpf_ptp - PTP parameters + * @info: structure defining PTP hardware capabilities + * @clock: pointer to registered PTP clock device + * @adapter: back pointer to the adapter + * @base_incval: base increment value of the PTP clock + * @max_adj: maximum adjustment of the PTP clock + * @cmd: HW specific command masks + * @cached_phc_time: a cached copy of the PHC time for timestamp extension + * @cached_phc_jiffies: jiffies when cached_phc_time was last updated + * @dev_clk_regs: the set of registers to access the device clock + * @caps: PTP capabilities negotiated with the Control Plane + * @get_dev_clk_time_access: access type for getting the device clock time + * @set_dev_clk_time_access: access type for setting the device clock time + * @adj_dev_clk_time_access: access type for the adjusting the device clock + * @tx_tstamp_access: access type for the Tx timestamp value read + * @rsv: reserved bits + * @secondary_mbx: parameters for using dedicated PTP mailbox + * @read_dev_clk_lock: spinlock protecting access to the device clock read + * operation executed by the HW latch + */ +struct idpf_ptp { + struct ptp_clock_info info; + struct ptp_clock *clock; + struct idpf_adapter *adapter; + u64 base_incval; + u64 max_adj; + struct idpf_ptp_cmd cmd; + u64 cached_phc_time; + unsigned long cached_phc_jiffies; + struct idpf_ptp_dev_clk_regs dev_clk_regs; + u32 caps; + enum idpf_ptp_access get_dev_clk_time_access:2; + enum idpf_ptp_access set_dev_clk_time_access:2; + enum idpf_ptp_access adj_dev_clk_time_access:2; + enum idpf_ptp_access tx_tstamp_access:2; + u8 rsv; + struct idpf_ptp_secondary_mbx secondary_mbx; + spinlock_t read_dev_clk_lock; +}; + +/** + * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info + * @info: pointer to ptp_clock_info struct + * + * Return: pointer to the corresponding adapter struct + */ +static inline struct idpf_adapter * +idpf_ptp_info_to_adapter(const struct ptp_clock_info *info) +{ + const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp, + info); + return ptp->adapter; +} + +/** + * struct idpf_ptp_dev_timers - System time and device time values + * @sys_time_ns: system time value expressed in nanoseconds + * @dev_clk_time_ns: device clock time value expressed in nanoseconds + */ +struct idpf_ptp_dev_timers { + u64 sys_time_ns; + u64 dev_clk_time_ns; +}; + +/** + * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Tx timestamp capabilities are negotiated with the Control Plane only if the + * device clock value can be read, Tx timestamp access type is different than + * NONE, and the PTP clock for the adapter is created. When all those conditions + * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is + * allocated and fulfilled. + * + * Return: true if the Tx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->tx_tstamp_caps) + return false; + else + return true; +} + +/** + * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Rx timestamp feature is enabled if the PTP clock for the adapter is created + * and it is possible to read the value of the device clock. The second + * assumption comes from the need to extend the Rx timestamp value to 64 bit + * based on the current device clock time. + * + * Return: true if the Rx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->adapter->ptp || + vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE) + return false; + else + return true; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int idpf_ptp_init(struct idpf_adapter *adapter); +void idpf_ptp_release(struct idpf_adapter *adapter); +int idpf_ptp_get_caps(struct idpf_adapter *adapter); +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter); +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq); +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time); +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time); +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval); +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta); +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport); +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport); +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config); +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp); +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp); +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx); +void idpf_tstamp_task(struct work_struct *work); +#else /* CONFIG_PTP_1588_CLOCK */ +static inline int idpf_ptp_init(struct idpf_adapter *adapter) +{ + return 0; +} + +static inline void idpf_ptp_release(struct idpf_adapter *adapter) { } + +static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + return -EOPNOTSUPP; +} + +static inline void +idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { } + +static inline bool +idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + return false; +} + +static inline int +idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, + u64 time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, + u64 incval) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, + s64 delta) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int +idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + return -EOPNOTSUPP; +} + +static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp) +{ + return 0; +} + +static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, + u32 in_timestamp) +{ + return 0; +} + +static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, + struct sk_buff *skb, u32 *idx) +{ + return -EOPNOTSUPP; +} + +static inline void idpf_tstamp_task(struct work_struct *work) { } +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* _IDPF_PTP_H */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index eae1b6f474e6..2e356dd10812 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -891,7 +891,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -901,21 +900,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, */ static void idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { u64 qword; qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); - *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); + fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); } /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -925,12 +923,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, */ static void idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); - *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, - le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); + fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, + le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); } /** @@ -938,18 +936,17 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * */ static void idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype); + idpf_rx_singleq_extract_base_fields(rx_desc, fields); else - idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } /** @@ -972,7 +969,6 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) struct libeth_rqe_info fields = { }; union virtchnl2_rx_desc *rx_desc; struct idpf_rx_buf *rx_buf; - u32 ptype; /* get the Rx desc from Rx queue based on 'next_to_clean' */ rx_desc = &rx_q->rx[ntc]; @@ -993,7 +989,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) */ dma_rmb(); - idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype); + idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); rx_buf = &rx_q->rx_buf[ntc]; if (!libeth_rx_sync_for_cpu(rx_buf, fields.len)) @@ -1037,7 +1033,8 @@ skip_data: total_rx_bytes += skb->len; /* protocol */ - idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, + fields.ptype); /* send completed skb up the stack */ napi_gro_receive(rx_q->pp->p.napi, skb); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 2d5f5c9f91ce..631679cdaa6f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -5,6 +5,7 @@ #include <net/libeth/tx.h> #include "idpf.h" +#include "idpf_ptp.h" #include "idpf_virtchnl.h" struct idpf_tx_stash { @@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport) */ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { + struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; + struct work_struct *tstamp_task = &vport->tstamp_task; int i, j, k = 0; vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), @@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) for (j = 0; j < tx_grp->num_txq; j++, k++) { vport->txqs[k] = tx_grp->txqs[j]; vport->txqs[k]->idx = k; + + if (!caps) + continue; + + vport->txqs[k]->cached_tstamp_caps = caps; + vport->txqs[k]->tstamp_task = tstamp_task; } } @@ -1655,6 +1664,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) } /** + * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value + * @txq: queue to read the timestamp from + * @skb: socket buffer to provide Tx timestamp value + * + * Schedule a work to read Tx timestamp value generated once the packet is + * transmitted. + */ +static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_ptp_tx_tstamp_status *tx_tstamp_status; + + tx_tstamp_caps = txq->cached_tstamp_caps; + spin_lock_bh(&tx_tstamp_caps->status_lock); + + for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) { + tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i]; + if (tx_tstamp_status->state != IDPF_PTP_FREE) + continue; + + tx_tstamp_status->skb = skb; + tx_tstamp_status->state = IDPF_PTP_REQUEST; + + /* Fetch timestamp from completion descriptor through + * virtchnl msg to report to stack. + */ + queue_work(system_unbound_wq, txq->tstamp_task); + break; + } + + spin_unlock_bh(&tx_tstamp_caps->status_lock); +} + +/** * idpf_tx_clean_stashed_bufs - clean bufs that were stored for * out of order completions * @txq: queue to clean @@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, continue; hash_del(&stash->hlist); + + if (stash->buf.type == LIBETH_SQE_SKB && + (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS)) + idpf_tx_read_tstamp(txq, stash->buf.skb); + libeth_tx_complete(&stash->buf, &cp); /* Push shadow buf back onto stack */ @@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) return false; - if (tx_buf->type == LIBETH_SQE_SKB) + if (tx_buf->type == LIBETH_SQE_SKB) { + if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS) + idpf_tx_read_tstamp(txq, tx_buf->skb); + libeth_tx_complete(tx_buf, &cp); + } idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); @@ -2127,7 +2179,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params, u16 td_cmd, u16 size) { - desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; + *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd); desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } @@ -2296,7 +2348,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, * descriptor. Reset that here. */ tx_desc = &txq->flex_tx[idx]; - memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); + memset(tx_desc, 0, sizeof(*tx_desc)); if (idx == 0) idx = txq->desc_count; idx--; @@ -2699,10 +2751,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * Since the TX buffer rings mimics the descriptor ring, update the tx buffer * ring entry to reflect that this index is a context descriptor */ -static struct idpf_flex_tx_ctx_desc * +static union idpf_flex_tx_ctx_desc * idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { - struct idpf_flex_tx_ctx_desc *desc; + union idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; txq->tx_buf[i].type = LIBETH_SQE_CTX; @@ -2732,6 +2784,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) return NETDEV_TX_OK; } +#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK)) +/** + * idpf_tx_tstamp - set up context descriptor for hardware timestamp + * @tx_q: queue to send buffer on + * @skb: pointer to the SKB we're sending + * @off: pointer to the offload struct + * + * Return: Positive index number on success, negative otherwise. + */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + int err, idx; + + /* only timestamp the outbound packet if the user has requested it */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return -1; + + if (!idpf_ptp_get_txq_tstamp_capability(tx_q)) + return -1; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (off->tx_flags & IDPF_TX_FLAGS_TSO) + return -1; + + /* Grab an open timestamp slot */ + err = idpf_ptp_request_ts(tx_q, skb, &idx); + if (err) { + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tstamp_skipped); + u64_stats_update_end(&tx_q->stats_sync); + + return -1; + } + + off->tx_flags |= IDPF_TX_FLAGS_TSYN; + + return idx; +} + +/** + * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate + * PHY Tx timestamp + * @ctx_desc: Context descriptor + * @idx: Index of the Tx timestamp latch + */ +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ + ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX, + IDPF_TX_CTX_DTYPE_M) | + le64_encode_bits(IDPF_TX_CTX_DESC_TSYN, + IDPF_TX_CTX_CMD_M) | + le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M); +} +#else /* CONFIG_PTP_1588_CLOCK */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + return -1; +} + +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors * @skb: send buffer @@ -2743,9 +2862,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; + union idpf_flex_tx_ctx_desc *ctx_desc; struct idpf_tx_buf *first; unsigned int count; - int tso; + int tso, idx; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) @@ -2765,8 +2885,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, if (tso) { /* If tso is needed, set up context desc */ - struct idpf_flex_tx_ctx_desc *ctx_desc = - idpf_tx_splitq_get_ctx_desc(tx_q); + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); ctx_desc->tso.qw1.cmd_dtype = cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | @@ -2784,6 +2903,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, u64_stats_update_end(&tx_q->stats_sync); } + idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload); + if (idx != -1) { + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); + idpf_tx_set_tstamp_desc(ctx_desc, idx); + } + /* record the location of the first descriptor for this packet */ first = &tx_q->tx_buf[tx_q->next_to_use]; first->skb = skb; @@ -3046,6 +3171,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, } /** + * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack + * @rxq: pointer to the rx queue that receives the timestamp + * @rx_desc: pointer to rx descriptor containing timestamp + * @skb: skb to put timestamp in + */ +static void +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct sk_buff *skb) +{ + u64 cached_time, ts_ns; + u32 ts_high; + + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) + return; + + cached_time = READ_ONCE(rxq->cached_phc_time); + + ts_high = le32_to_cpu(rx_desc->ts_high); + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ts_ns), + }; +} + +/** * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor * @rxq: Rx descriptor ring packet is being transacted on * @skb: pointer to current skb being populated @@ -3070,6 +3222,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* process RSS/hash */ idpf_rx_hash(rxq, skb, rx_desc, decoded); + if (idpf_queue_has(PTP, rxq)) + idpf_rx_hwtstamp(rxq, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rxq->netdev); skb_record_rx_queue(skb, rxq->idx); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index b029f566e57c..c779fe71df99 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -142,6 +142,7 @@ do { \ #define IDPF_TX_FLAGS_IPV4 BIT(1) #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_TX_FLAGS_TSYN BIT(4) union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ @@ -289,6 +290,8 @@ struct idpf_ptype_state { * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) + * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the + * queue * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -299,6 +302,7 @@ enum idpf_queue_flags_t { __IDPF_Q_POLL_MODE, __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, + __IDPF_Q_PTP, __IDPF_Q_FLAGS_NBITS, }; @@ -443,6 +447,7 @@ struct idpf_tx_queue_stats { u64_stats_t q_busy; u64_stats_t skb_drops; u64_stats_t dma_map_errs; + u64_stats_t tstamp_skipped; }; #define IDPF_ITR_DYNAMIC 1 @@ -494,6 +499,7 @@ struct idpf_txq_stash { * @next_to_alloc: RX buffer to allocate at * @skb: Pointer to the skb * @truesize: data buffer truesize in singleq + * @cached_phc_time: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats * @q_id: Queue id @@ -541,6 +547,7 @@ struct idpf_rx_queue { struct sk_buff *skb; u32 truesize; + u64 cached_phc_time; struct u64_stats_sync stats_sync; struct idpf_rx_queue_stats q_stats; @@ -560,7 +567,7 @@ struct idpf_rx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_rx_queue, 64, - 80 + sizeof(struct u64_stats_sync), + 88 + sizeof(struct u64_stats_sync), 32); /** @@ -617,6 +624,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP + * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_tx_queue_stats * @q_id: Queue id @@ -630,7 +639,7 @@ struct idpf_tx_queue { struct idpf_base_tx_desc *base_tx; struct idpf_base_tx_ctx_desc *base_ctx; union idpf_tx_flex_desc *flex_tx; - struct idpf_flex_tx_ctx_desc *flex_ctx; + union idpf_flex_tx_ctx_desc *flex_ctx; void *desc_ring; }; @@ -666,6 +675,9 @@ struct idpf_tx_queue { u16 compl_tag_cur_gen; u16 compl_tag_gen_max; + struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps; + struct work_struct *tstamp_task; + struct u64_stats_sync stats_sync; struct idpf_tx_queue_stats q_stats; __cacheline_group_end_aligned(read_write); @@ -679,7 +691,7 @@ struct idpf_tx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 88 + sizeof(struct u64_stats_sync), + 112 + sizeof(struct u64_stats_sync), 24); /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 3d2413b8684f..07a9f5ae34fd 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -5,88 +5,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" - -#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 -#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) -#define IDPF_VC_XN_IDX_M GENMASK(7, 0) -#define IDPF_VC_XN_SALT_M GENMASK(15, 8) -#define IDPF_VC_XN_RING_LEN U8_MAX - -/** - * enum idpf_vc_xn_state - Virtchnl transaction status - * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used - * @IDPF_VC_XN_WAITING: expecting a reply, not yet received - * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, - * buffer updated - * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there - * was an error, buffer not updated - * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down - * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the - * return context; a callback may be provided to handle - * return - */ -enum idpf_vc_xn_state { - IDPF_VC_XN_IDLE = 1, - IDPF_VC_XN_WAITING, - IDPF_VC_XN_COMPLETED_SUCCESS, - IDPF_VC_XN_COMPLETED_FAILED, - IDPF_VC_XN_SHUTDOWN, - IDPF_VC_XN_ASYNC, -}; - -struct idpf_vc_xn; -/* Callback for asynchronous messages */ -typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, - const struct idpf_ctlq_msg *); - -/** - * struct idpf_vc_xn - Data structure representing virtchnl transactions - * @completed: virtchnl event loop uses that to signal when a reply is - * available, uses kernel completion API - * @state: virtchnl event loop stores the data below, protected by the - * completion's lock. - * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be - * truncated on its way to the receiver thread according to - * reply_buf.iov_len. - * @reply: Reference to the buffer(s) where the reply data should be written - * to. May be 0-length (then NULL address permitted) if the reply data - * should be ignored. - * @async_handler: if sent asynchronously, a callback can be provided to handle - * the reply when it's received - * @vc_op: corresponding opcode sent with this transaction - * @idx: index used as retrieval on reply receive, used for cookie - * @salt: changed every message to make unique, used for cookie - */ -struct idpf_vc_xn { - struct completion completed; - enum idpf_vc_xn_state state; - size_t reply_sz; - struct kvec reply; - async_vc_cb async_handler; - u32 vc_op; - u8 idx; - u8 salt; -}; - -/** - * struct idpf_vc_xn_params - Parameters for executing transaction - * @send_buf: kvec for send buffer - * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length - * @timeout_ms: timeout to wait for reply - * @async: send message asynchronously, will not wait on completion - * @async_handler: If sent asynchronously, optional callback handler. The user - * must be careful when using async handlers as the memory for - * the recv_buf _cannot_ be on stack if this is async. - * @vc_op: virtchnl op to send - */ -struct idpf_vc_xn_params { - struct kvec send_buf; - struct kvec recv_buf; - int timeout_ms; - bool async; - async_vc_cb async_handler; - u32 vc_op; -}; +#include "idpf_ptp.h" /** * struct idpf_vc_xn_manager - Manager for tracking transactions @@ -235,6 +154,55 @@ err_kfree: return err; } +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +/** + * idpf_ptp_is_mb_msg - Check if the message is PTP-related + * @op: virtchnl opcode + * + * Return: true if msg is PTP-related, false otherwise. + */ +static bool idpf_ptp_is_mb_msg(u32 op) +{ + switch (op) { + case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: + case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: + return true; + default: + return false; + } +} + +/** + * idpf_prepare_ptp_mb_msg - Prepare PTP related message + * + * @adapter: Driver specific private structure + * @op: virtchnl opcode + * @ctlq_msg: Corresponding control queue message + */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ + /* If the message is PTP-related and the secondary mailbox is available, + * send the message through the secondary mailbox. + */ + if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) + return; + + ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; + ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; + ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; +} +#else /* !CONFIG_PTP_1588_CLOCK */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_send_mb_msg - Send message over mailbox * @adapter: Driver specific private structure @@ -278,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; ctlq_msg->func_id = 0; + + idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg); + ctlq_msg->data_len = msg_size; ctlq_msg->cookie.mbx.chnl_opcode = op; ctlq_msg->cookie.mbx.chnl_retval = 0; @@ -449,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for * error. */ -static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, - const struct idpf_vc_xn_params *params) +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params) { const struct kvec *send_buf = ¶ms->send_buf; struct idpf_vc_xn *xn; @@ -900,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_MACFILTER | VIRTCHNL2_CAP_SPLITQ_QSCHED | VIRTCHNL2_CAP_PROMISC | - VIRTCHNL2_CAP_LOOPBACK); + VIRTCHNL2_CAP_LOOPBACK | + VIRTCHNL2_CAP_PTP); xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; xn_params.send_buf.iov_base = ∩︀ @@ -3029,6 +3001,11 @@ restart: goto err_intr_req; } + err = idpf_ptp_init(adapter); + if (err) + pci_err(adapter->pdev, "PTP init failed, err=%pe\n", + ERR_PTR(err)); + idpf_init_avail_queues(adapter); /* Skew the delay for init tasks for each function based on fn number @@ -3091,6 +3068,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) if (!remove_in_prog) idpf_vc_xn_shutdown(adapter->vcxn_mngr); + idpf_ptp_release(adapter); idpf_deinit_task(adapter); idpf_intr_rel(adapter); @@ -3158,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) u16 rx_itr[] = {2, 8, 32, 96, 128}; struct idpf_rss_data *rss_data; u16 idx = vport->idx; + int err; vport_config = adapter->vport_config[idx]; rss_data = &vport_config->user_config.rss_data; @@ -3192,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) idpf_vport_alloc_vec_indexes(vport); vport->crc_enable = adapter->crc_enable; + + if (!(vport_msg->vport_flags & + cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT))) + return; + + err = idpf_ptp_get_vport_tstamps_caps(vport); + if (err) { + pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); + return; + } + + INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 83da5d8da56b..3522c1238ea2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -4,6 +4,88 @@ #ifndef _IDPF_VIRTCHNL_H_ #define _IDPF_VIRTCHNL_H_ +#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 +#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) +#define IDPF_VC_XN_IDX_M GENMASK(7, 0) +#define IDPF_VC_XN_SALT_M GENMASK(15, 8) +#define IDPF_VC_XN_RING_LEN U8_MAX + +/** + * enum idpf_vc_xn_state - Virtchnl transaction status + * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used + * @IDPF_VC_XN_WAITING: expecting a reply, not yet received + * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer + * updated + * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there + * was an error, buffer not updated + * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down + * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the + * return context; a callback may be provided to handle + * return + */ +enum idpf_vc_xn_state { + IDPF_VC_XN_IDLE = 1, + IDPF_VC_XN_WAITING, + IDPF_VC_XN_COMPLETED_SUCCESS, + IDPF_VC_XN_COMPLETED_FAILED, + IDPF_VC_XN_SHUTDOWN, + IDPF_VC_XN_ASYNC, +}; + +struct idpf_vc_xn; +/* Callback for asynchronous messages */ +typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, + const struct idpf_ctlq_msg *); + +/** + * struct idpf_vc_xn - Data structure representing virtchnl transactions + * @completed: virtchnl event loop uses that to signal when a reply is + * available, uses kernel completion API + * @state: virtchnl event loop stores the data below, protected by the + * completion's lock. + * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be + * truncated on its way to the receiver thread according to + * reply_buf.iov_len. + * @reply: Reference to the buffer(s) where the reply data should be written + * to. May be 0-length (then NULL address permitted) if the reply data + * should be ignored. + * @async_handler: if sent asynchronously, a callback can be provided to handle + * the reply when it's received + * @vc_op: corresponding opcode sent with this transaction + * @idx: index used as retrieval on reply receive, used for cookie + * @salt: changed every message to make unique, used for cookie + */ +struct idpf_vc_xn { + struct completion completed; + enum idpf_vc_xn_state state; + size_t reply_sz; + struct kvec reply; + async_vc_cb async_handler; + u32 vc_op; + u8 idx; + u8 salt; +}; + +/** + * struct idpf_vc_xn_params - Parameters for executing transaction + * @send_buf: kvec for send buffer + * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length + * @timeout_ms: timeout to wait for reply + * @async: send message asynchronously, will not wait on completion + * @async_handler: If sent asynchronously, optional callback handler. The user + * must be careful when using async handlers as the memory for + * the recv_buf _cannot_ be on stack if this is async. + * @vc_op: virtchnl op to send + */ +struct idpf_vc_xn_params { + struct kvec send_buf; + struct kvec recv_buf; + int timeout_ms; + bool async; + async_vc_cb async_handler; + u32 vc_op; +}; + struct idpf_adapter; struct idpf_netdev_priv; struct idpf_vec_regs; @@ -11,6 +93,8 @@ struct idpf_vport; struct idpf_vport_max_q; struct idpf_vport_user_config_data; +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params); int idpf_init_dflt_mbx(struct idpf_adapter *adapter); void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); int idpf_vc_core_init(struct idpf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c new file mode 100644 index 000000000000..bdcc54a5fb56 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" +#include "idpf_virtchnl.h" + +/** + * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message + * @adapter: Driver specific private structure + * + * Send virtchnl get PTP capabilities message. + * + * Return: 0 on success, -errno on failure. + */ +int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL; + struct virtchnl2_ptp_get_caps send_ptp_caps_msg = { + .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME | + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME | + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB | + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB) + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS, + .send_buf.iov_base = &send_ptp_caps_msg, + .send_buf.iov_len = sizeof(send_ptp_caps_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; + struct virtchnl2_ptp_clk_reg_offsets clock_offsets; + struct idpf_ptp_secondary_mbx *scnd_mbx; + struct idpf_ptp *ptp = adapter->ptp; + enum idpf_ptp_access access_type; + u32 temp_offset; + int reply_sz; + + recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps), + GFP_KERNEL); + if (!recv_ptp_caps_msg) + return -ENOMEM; + + xn_params.recv_buf.iov_base = recv_ptp_caps_msg; + xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + else if (reply_sz != sizeof(*recv_ptp_caps_msg)) + return -EIO; + + ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps); + ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval); + ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj); + + scnd_mbx = &ptp->secondary_mbx; + scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id); + + /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary + * mailbox is not supported. + */ + scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff; + if (scnd_mbx->valid) + scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id; + + /* Determine the access type for the PTP features */ + idpf_ptp_get_features_access(adapter); + + access_type = ptp->get_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + goto discipline_clock; + + clock_offsets = recv_ptp_caps_msg->clk_offsets; + + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l); + ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h); + ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l); + ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h); + ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + +discipline_clock: + access_type = ptp->adj_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + return 0; + + clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets; + + /* Device clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type); + ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l); + ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h); + ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l); + ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h); + ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + /* PHY clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type); + ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l); + ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h); + ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l); + ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h); + ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + return 0; +} + +/** + * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message + * @adapter: Driver specific private structure + * @dev_clk_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get time message to get the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME, + .send_buf.iov_base = &get_dev_clk_time_msg, + .send_buf.iov_len = sizeof(get_dev_clk_time_msg), + .recv_buf.iov_base = &get_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(get_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + u64 dev_time; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(get_dev_clk_time_msg)) + return -EIO; + + dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns); + dev_clk_time->dev_clk_time_ns = dev_time; + + return 0; +} + +/** + * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message + * @adapter: Driver specific private structure + * @time: New time value + * + * Send virtchnl set time message to set the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time) +{ + struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = { + .dev_time_ns = cpu_to_le64(time), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME, + .send_buf.iov_base = &set_dev_clk_time_msg, + .send_buf.iov_len = sizeof(set_dev_clk_time_msg), + .recv_buf.iov_base = &set_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(set_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(set_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message + * @adapter: Driver specific private structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Send virtchnl adj time message to adjust the clock by the indicated delta. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta) +{ + struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = { + .delta = cpu_to_le64(delta), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME, + .send_buf.iov_base = &adj_dev_clk_time_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .recv_buf.iov_base = &adj_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message + * @adapter: Driver specific private structure + * @incval: Source timer increment value per clock cycle + * + * Send virtchnl adj fine message to adjust the frequency of the clock by + * incval. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval) +{ + struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = { + .incval = cpu_to_le64(incval), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE, + .send_buf.iov_base = &adj_dev_clk_fine_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .recv_buf.iov_base = &adj_dev_clk_fine_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_fine_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport + * @vport: Virtual port structure + * + * Send virtchnl get vport tstamps caps message to receive the set of tstamp + * capabilities per vport. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps; + struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps; + struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps; + struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS, + .send_buf.iov_base = &send_tx_tstamp_caps, + .send_buf.iov_len = sizeof(send_tx_tstamp_caps), + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + enum idpf_ptp_access tstamp_access, get_dev_clk_access; + struct idpf_ptp *ptp = vport->adapter->ptp; + struct list_head *head; + int err = 0, reply_sz; + u16 num_latches; + u32 size; + + if (!ptp) + return -EOPNOTSUPP; + + tstamp_access = ptp->tx_tstamp_access; + get_dev_clk_access = ptp->get_dev_clk_time_access; + if (tstamp_access == IDPF_PTP_NONE || + get_dev_clk_access == IDPF_PTP_NONE) + return -EOPNOTSUPP; + + rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcv_tx_tstamp_caps) + return -ENOMEM; + + send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id); + xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) { + err = reply_sz; + goto get_tstamp_caps_out; + } + + num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches); + size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches); + if (reply_sz != size) { + err = -EIO; + goto get_tstamp_caps_out; + } + + size = struct_size(tstamp_caps, tx_tstamp_status, num_latches); + tstamp_caps = kzalloc(size, GFP_KERNEL); + if (!tstamp_caps) { + err = -ENOMEM; + goto get_tstamp_caps_out; + } + + tstamp_caps->access = true; + tstamp_caps->num_entries = num_latches; + + INIT_LIST_HEAD(&tstamp_caps->latches_in_use); + INIT_LIST_HEAD(&tstamp_caps->latches_free); + + spin_lock_init(&tstamp_caps->latches_lock); + spin_lock_init(&tstamp_caps->status_lock); + + tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit; + + for (u16 i = 0; i < tstamp_caps->num_entries; i++) { + __le32 offset_l, offset_h; + + ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL); + if (!ptp_tx_tstamp) { + err = -ENOMEM; + goto err_free_ptp_tx_stamp_list; + } + + tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i]; + + if (tstamp_access != IDPF_PTP_DIRECT) + goto skip_offsets; + + offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l; + offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h; + ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l); + ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h); + +skip_offsets: + ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index; + + list_add(&ptp_tx_tstamp->list_member, + &tstamp_caps->latches_free); + + tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE; + } + + vport->tx_tstamp_caps = tstamp_caps; + kfree(rcv_tx_tstamp_caps); + + return 0; + +err_free_ptp_tx_stamp_list: + head = &tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + kfree(tstamp_caps); +get_tstamp_caps_out: + kfree(rcv_tx_tstamp_caps); + + return err; +} + +/** + * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on + * the skb compatibility. + * @caps: Tx timestamp capabilities that monitor the latch status + * @skb: skb for which the tstamp value is returned through virtchnl message + * @current_state: Current state of the Tx timestamp latch + * @expected_state: Expected state of the Tx timestamp latch + * + * Find a proper skb tracker for which the Tx timestamp is received and change + * the state to expected value. + * + * Return: true if the tracker has been found and updated, false otherwise. + */ +static bool +idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps, + struct sk_buff *skb, + enum idpf_ptp_tx_tstamp_state current_state, + enum idpf_ptp_tx_tstamp_state expected_state) +{ + bool updated = false; + + spin_lock(&caps->status_lock); + for (u16 i = 0; i < caps->num_entries; i++) { + struct idpf_ptp_tx_tstamp_status *status; + + status = &caps->tx_tstamp_status[i]; + + if (skb == status->skb && status->state == current_state) { + status->state = expected_state; + updated = true; + break; + } + } + spin_unlock(&caps->status_lock); + + return updated; +} + +/** + * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it + * back to the skb. + * @vport: Virtual port structure + * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane + * @ptp_tx_tstamp: Tx timestamp latch to add to the free list + * + * Read the value of the Tx timestamp for a given latch received from the + * Control Plane, extend it to 64 bit and provide back to the skb. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tstamp_value(struct idpf_vport *vport, + struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch, + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct skb_shared_hwtstamps shhwtstamps; + bool state_upd = false; + u8 tstamp_ns_lo_bit; + u64 tstamp; + + tx_tstamp_caps = vport->tx_tstamp_caps; + tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit; + + ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp); + ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_READ_VALUE, + IDPF_PTP_FREE); + if (!state_upd) + return -EINVAL; + + tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp); + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps); + consume_skb(ptp_tx_tstamp->skb); + + list_add(&ptp_tx_tstamp->list_member, + &tx_tstamp_caps->latches_free); + + return 0; +} + +/** + * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps + * @adapter: Driver specific private structure + * @xn: transaction for message + * @ctlq_msg: received message + * + * Read the tstamps Tx tstamp values from a received message and put them + * directly to the skb. The number of timestamps to read is specified by + * the virtchnl message. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter, + struct idpf_vc_xn *xn, + const struct idpf_ctlq_msg *ctlq_msg) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch; + struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp; + struct idpf_vport *tstamp_vport = NULL; + struct list_head *head; + u16 num_latches; + u32 vport_id; + int err = 0; + + recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va; + vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id); + + idpf_for_each_vport(adapter, vport) { + if (!vport) + continue; + + if (vport->vport_id == vport_id) { + tstamp_vport = vport; + break; + } + } + + if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps) + return -EINVAL; + + tx_tstamp_caps = tstamp_vport->tx_tstamp_caps; + num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches); + + spin_lock_bh(&tx_tstamp_caps->latches_lock); + head = &tx_tstamp_caps->latches_in_use; + + for (u16 i = 0; i < num_latches; i++) { + tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i]; + + if (!tstamp_latch.valid) + continue; + + if (list_empty(head)) { + err = -ENOBUFS; + goto unlock; + } + + list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) { + if (tstamp_latch.index == tx_tstamp->idx) { + list_del(&tx_tstamp->list_member); + err = idpf_ptp_get_tstamp_value(tstamp_vport, + &tstamp_latch, + tx_tstamp); + if (err) + goto unlock; + + break; + } + } + } + +unlock: + spin_unlock_bh(&tx_tstamp_caps->latches_lock); + + return err; +} + +/** + * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message + * @vport: Virtual port structure + * + * Send virtchnl get Tx tstamp message to read the value of the HW timestamp. + * The message contains a list of indexes set in the Tx descriptors. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + .async = true, + .async_handler = idpf_ptp_get_tx_tstamp_async_handler, + }; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + int reply_sz, size, msg_size; + struct list_head *head; + bool state_upd; + u16 id = 0; + + tx_tstamp_caps = vport->tx_tstamp_caps; + head = &tx_tstamp_caps->latches_in_use; + + size = struct_size(send_tx_tstamp_msg, tstamp_latches, + tx_tstamp_caps->num_entries); + send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL); + if (!send_tx_tstamp_msg) + return -ENOMEM; + + spin_lock_bh(&tx_tstamp_caps->latches_lock); + list_for_each_entry(ptp_tx_tstamp, head, list_member) { + u8 idx; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE); + if (!state_upd) + continue; + + idx = ptp_tx_tstamp->idx; + send_tx_tstamp_msg->tstamp_latches[id].index = idx; + id++; + } + spin_unlock_bh(&tx_tstamp_caps->latches_lock); + + msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id); + send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id); + send_tx_tstamp_msg->num_latches = cpu_to_le16(id); + xn_params.send_buf.iov_base = send_tx_tstamp_msg; + xn_params.send_buf.iov_len = msg_size; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + kfree(send_tx_tstamp_msg); + + return min(reply_sz, 0); +} diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 63deb120359c..11b8f6f05799 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -68,6 +68,16 @@ enum virtchnl2_op { VIRTCHNL2_OP_ADD_MAC_ADDR = 535, VIRTCHNL2_OP_DEL_MAC_ADDR = 536, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537, + + /* TimeSync opcodes */ + VIRTCHNL2_OP_PTP_GET_CAPS = 541, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542, + VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543, + VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544, + VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548, }; /** @@ -560,6 +570,14 @@ struct virtchnl2_queue_reg_chunks { VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); /** + * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities. + * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports + */ +enum virtchnl2_vport_flags { + VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), +}; + +/** * struct virtchnl2_create_vport - Create vport config info. * @vport_type: See enum virtchnl2_vport_type. * @txq_model: See virtchnl2_queue_model. @@ -577,7 +595,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); * @max_mtu: Max MTU. CP populates this field on response. * @vport_id: Vport id. CP populates this field on response. * @default_mac_addr: Default MAC address. - * @pad: Padding. + * @vport_flags: See enum virtchnl2_vport_flags. * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. * @pad1: Padding. @@ -610,7 +628,7 @@ struct virtchnl2_create_vport { __le16 max_mtu; __le32 vport_id; u8 default_mac_addr[ETH_ALEN]; - __le16 pad; + __le16 vport_flags; __le64 rx_desc_ids; __le64 tx_desc_ids; u8 pad1[72]; @@ -1270,4 +1288,296 @@ struct virtchnl2_promisc_info { }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); +/** + * enum virtchnl2_ptp_caps - PTP capabilities + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device + * clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of + * device clock + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping + * + * PF/VF negotiates a set of supported PTP capabilities with the Control Plane. + * There are two access methods - mailbox (_MB) and direct. + * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer, + * cross timestamping and the Tx timestamping. + */ +enum virtchnl2_ptp_caps { + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0), + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9), +}; + +/** + * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks + * registers. + * @dev_clk_ns_l: Device clock low register offset + * @dev_clk_ns_h: Device clock high register offset + * @phy_clk_ns_l: PHY clock low register offset + * @phy_clk_ns_h: PHY clock high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_clk_reg_offsets { + __le32 dev_clk_ns_l; + __le32 dev_clk_ns_h; + __le32 phy_clk_ns_l; + __le32 phy_clk_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets); + +/** + * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross + * time registers. + * @sys_time_ns_l: System time low register offset + * @sys_time_ns_h: System time high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_cross_time_reg_offsets { + __le32 sys_time_ns_l; + __le32 sys_time_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets); + +/** + * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks + * adjustments registers. + * @dev_clk_cmd_type: Device clock command type register offset + * @dev_clk_incval_l: Device clock increment value low register offset + * @dev_clk_incval_h: Device clock increment value high registers offset + * @dev_clk_shadj_l: Device clock shadow adjust low register offset + * @dev_clk_shadj_h: Device clock shadow adjust high register offset + * @phy_clk_cmd_type: PHY timer command type register offset + * @phy_clk_incval_l: PHY timer increment value low register offset + * @phy_clk_incval_h: PHY timer increment value high register offset + * @phy_clk_shadj_l: PHY timer shadow adjust low register offset + * @phy_clk_shadj_h: PHY timer shadow adjust high register offset + */ +struct virtchnl2_ptp_clk_adj_reg_offsets { + __le32 dev_clk_cmd_type; + __le32 dev_clk_incval_l; + __le32 dev_clk_incval_h; + __le32 dev_clk_shadj_l; + __le32 dev_clk_shadj_h; + __le32 phy_clk_cmd_type; + __le32 phy_clk_incval_l; + __le32 phy_clk_incval_h; + __le32 phy_clk_shadj_l; + __le32 phy_clk_shadj_h; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch + * capabilities. + * @tx_latch_reg_offset_l: Tx timestamp latch low register offset + * @tx_latch_reg_offset_h: Tx timestamp latch high register offset + * @index: Latch index provided to the Tx descriptor + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch_caps { + __le32 tx_latch_reg_offset_l; + __le32 tx_latch_reg_offset_h; + u8 index; + u8 pad[7]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx + * tstamp entries. + * @vport_id: Vport number + * @num_latches: Total number of latches + * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp + * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp + * @pad: Padding for future tstamp granularity extensions + * @tstamp_latches: Capabilities of Tx timestamp entries + * + * PF/VF sends this message to negotiate the Tx timestamp latches for each + * Vport. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_caps { + __le32 vport_id; + __le16 num_latches; + u8 tstamp_ns_lo_bit; + u8 tstamp_ns_hi_bit; + u8 pad[8]; + + struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps); + +/** + * struct virtchnl2_ptp_get_caps - Get PTP capabilities + * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps + * @max_adj: The maximum possible frequency adjustment + * @base_incval: The default timer increment value + * @peer_mbx_q_id: ID of the PTP Device Control daemon queue + * @peer_id: Peer ID for PTP Device Control daemon + * @secondary_mbx: Indicates to the driver that it should create a secondary + * mailbox to inetract with control plane for PTP + * @pad: Padding for future extensions + * @clk_offsets: Main timer and PHY registers offsets + * @cross_time_offsets: Cross time registers offsets + * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer + * + * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap + * with supported features and fulfills appropriate structures. + * If HW uses primary MBX for PTP: secondary_mbx is set to false. + * If HW uses secondary MBX for PTP: secondary_mbx is set to true. + * Control plane has 2 MBX and the driver has 1 MBX, send to peer + * driver may be used to send a message using valid ptp_peer_mb_q_id and + * ptp_peer_id. + * If HW does not use send to peer driver: secondary_mbx is no care field and + * peer_mbx_q_id holds invalid value (0xFFFF). + * + * Associated with VIRTCHNL2_OP_PTP_GET_CAPS. + */ +struct virtchnl2_ptp_get_caps { + __le32 caps; + __le32 max_adj; + __le64 base_incval; + __le16 peer_mbx_q_id; + u8 peer_id; + u8 secondary_mbx; + u8 pad[4]; + + struct virtchnl2_ptp_clk_reg_offsets clk_offsets; + struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp + * values, index and validity. + * @tstamp: Timestamp value + * @index: Timestamp index from which the value is read + * @valid: Timestamp validity + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch { + __le64 tstamp; + u8 index; + u8 valid; + u8 pad[6]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches + * associated with the vport. + * @vport_id: Number of vport that requests the timestamp + * @num_latches: Number of latches + * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp + * @pad: Padding for future extensions + * @device_time: device time if get_devtime_with_txtstmp was set in request + * @tstamp_latches: PTP TX timestamp latch + * + * PF/VF sends this message to receive a specified number of timestamps + * entries. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_latches { + __le32 vport_id; + __le16 num_latches; + u8 get_devtime_with_txtstmp; + u8 pad[1]; + __le64 device_time; + + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches); + +/** + * struct virtchnl2_ptp_get_dev_clk_time - Associated with message + * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME. + * @dev_time_ns: Device clock time value in nanoseconds + * + * PF/VF sends this message to receive the time from the main timer. + */ +struct virtchnl2_ptp_get_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time); + +/** + * struct virtchnl2_ptp_get_cross_time: Associated with message + * VIRTCHNL2_OP_PTP_GET_CROSS_TIME. + * @sys_time_ns: System counter value expressed in nanoseconds, read + * synchronously with device time + * @dev_time_ns: Device clock time value expressed in nanoseconds + * + * PF/VF sends this message to receive the cross time. + */ +struct virtchnl2_ptp_get_cross_time { + __le64 sys_time_ns; + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time); + +/** + * struct virtchnl2_ptp_set_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME. + * @dev_time_ns: Device time value expressed in nanoseconds to set + * + * PF/VF sends this message to set the time of the main timer. + */ +struct virtchnl2_ptp_set_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time); + +/** + * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE. + * @incval: Source timer increment value per clock cycle + * + * PF/VF sends this message to adjust the frequency of the main timer by the + * indicated increment value. + */ +struct virtchnl2_ptp_adj_dev_clk_fine { + __le64 incval; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine); + +/** + * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME. + * @delta: Offset in nanoseconds to adjust the time by + * + * PF/VF sends this message to adjust the time of the main timer by the delta. + */ +struct virtchnl2_ptp_adj_dev_clk_time { + __le64 delta; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time); + #endif /* _VIRTCHNL_2_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 02f340280d20..f34ead8243e9 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -391,7 +391,8 @@ enum e1000_ring_flags_t { IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG, - IGB_RING_FLAG_TX_DISABLED + IGB_RING_FLAG_TX_DISABLED, + IGB_RING_FLAG_RX_ALLOC_FAILED, }; #define ring_uses_large_buffer(ring) \ @@ -722,6 +723,8 @@ enum igb_boards { extern char igb_driver_name[]; +void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx, + struct napi_struct *napi); int igb_xmit_xdp_ring(struct igb_adapter *adapter, struct igb_ring *ring, struct xdp_frame *xdpf); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c646c71915f0..9e9a5900e6e5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -947,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter) q_vector); if (err) goto err_free; + + netif_napi_set_irq(&q_vector->napi, + adapter->msix_entries[vector].vector); } igb_configure_msix(adapter); @@ -1194,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, return -ENOMEM; /* initialize NAPI */ - netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll); + netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll, + v_idx); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -2096,6 +2100,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter) wr32(E1000_CTRL_EXT, ctrl_ext); } +void igb_set_queue_napi(struct igb_adapter *adapter, int vector, + struct napi_struct *napi) +{ + struct igb_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->rx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->rx.ring->queue_index, + NETDEV_QUEUE_TYPE_RX, napi); + + if (q_vector->tx.ring) + netif_queue_set_napi(adapter->netdev, + q_vector->tx.ring->queue_index, + NETDEV_QUEUE_TYPE_TX, napi); +} + /** * igb_up - Open the interface and prepare it to handle traffic * @adapter: board private structure @@ -2103,6 +2123,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter) int igb_up(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + struct napi_struct *napi; int i; /* hardware has been reset, we need to reload some things */ @@ -2110,8 +2131,11 @@ int igb_up(struct igb_adapter *adapter) clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&(adapter->q_vector[i]->napi)); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igb_set_queue_napi(adapter, i, napi); + } if (adapter->flags & IGB_FLAG_HAS_MSIX) igb_configure_msix(adapter); @@ -2181,6 +2205,7 @@ void igb_down(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { if (adapter->q_vector[i]) { napi_synchronize(&adapter->q_vector[i]->napi); + igb_set_queue_napi(adapter, i, NULL); napi_disable(&adapter->q_vector[i]->napi); } } @@ -4113,8 +4138,9 @@ static int igb_sw_init(struct igb_adapter *adapter) static int __igb_open(struct net_device *netdev, bool resuming) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct napi_struct *napi; int err; int i; @@ -4166,8 +4192,11 @@ static int __igb_open(struct net_device *netdev, bool resuming) /* From here on the code is the same as igb_up() */ clear_bit(__IGB_DOWN, &adapter->state); - for (i = 0; i < adapter->num_q_vectors; i++) - napi_enable(&(adapter->q_vector[i]->napi)); + for (i = 0; i < adapter->num_q_vectors; i++) { + napi = &adapter->q_vector[i]->napi; + napi_enable(napi); + igb_set_queue_napi(adapter, i, napi); + } /* Clear any pending interrupts. */ rd32(E1000_TSICR); @@ -5726,11 +5755,29 @@ no_wait: if (adapter->flags & IGB_FLAG_HAS_MSIX) { u32 eics = 0; - for (i = 0; i < adapter->num_q_vectors; i++) - eics |= adapter->q_vector[i]->eims_value; - wr32(E1000_EICS, eics); + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + struct igb_ring *rx_ring; + + if (!q_vector->rx.ring) + continue; + + rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index]; + + if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + eics |= q_vector->eims_value; + clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + } + } + if (eics) + wr32(E1000_EICS, eics); } else { - wr32(E1000_ICS, E1000_ICS_RXDMT0); + struct igb_ring *rx_ring = adapter->rx_ring[0]; + + if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) { + clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } } igb_spoof_check(adapter); @@ -9061,6 +9108,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) if (!xdp_res && !skb) { rx_ring->rx_stats.alloc_failed++; rx_buffer->pagecnt_bias++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); break; } @@ -9120,6 +9168,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, page = dev_alloc_pages(igb_rx_pg_order(rx_ring)); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -9136,6 +9185,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, __free_pages(page, igb_rx_pg_order(rx_ring)); rx_ring->rx_stats.alloc_failed++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); return false; } @@ -9674,8 +9724,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; + rtnl_lock(); if (netif_running(netdev)) igb_down(adapter); + rtnl_unlock(); + pci_disable_device(pdev); /* Request a slot reset. */ @@ -9734,16 +9787,21 @@ static void igb_io_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); + rtnl_lock(); if (netif_running(netdev)) { if (!test_bit(__IGB_DOWN, &adapter->state)) { dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n"); + rtnl_unlock(); return; } + if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); + rtnl_unlock(); return; } } + rtnl_unlock(); netif_device_attach(netdev); diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index f323e1c1989f..793c96016288 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -502,13 +502,6 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_EXTTS: - /* Reject requests with unsupported flags */ - if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - /* Both the rising and falling edge are timestamped */ if (rq->extts.flags & PTP_STRICT_FLAGS && (rq->extts.flags & PTP_ENABLE_FEATURE) && @@ -658,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_EXTTS: - /* Reject requests with unsupported flags */ - if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - /* Reject requests failing to enable both edges. */ if ((rq->extts.flags & PTP_STRICT_FLAGS) && (rq->extts.flags & PTP_ENABLE_FEATURE) && @@ -1356,6 +1342,9 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.n_per_out = IGB_N_PEROUT; adapter->ptp_caps.n_pins = IGB_N_SDP; adapter->ptp_caps.pps = 0; + adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; @@ -1378,6 +1367,9 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; adapter->ptp_caps.n_per_out = IGB_N_PEROUT; adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; adapter->ptp_caps.pps = 1; adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c index 157d43787fa0..5cf67ba29269 100644 --- a/drivers/net/ethernet/intel/igb/igb_xsk.c +++ b/drivers/net/ethernet/intel/igb/igb_xsk.c @@ -415,6 +415,7 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_failed++; + set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags); break; } diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 2f265c0959c7..859a15e4ccba 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -40,6 +40,11 @@ void igc_ethtool_set_ops(struct net_device *); #define IGC_MAX_TX_TSTAMP_REGS 4 +struct igc_fpe_t { + struct ethtool_mmsv mmsv; + u32 tx_min_frag_size; +}; + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -333,6 +338,8 @@ struct igc_adapter { struct timespec64 period; } perout[IGC_N_PEROUT]; + struct igc_fpe_t fpe; + /* LEDs */ struct mutex led_mutex; struct igc_led_classdev *leds; @@ -387,11 +394,11 @@ extern char igc_driver_name[]; #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) #define IGC_FLAG_TSN_QAV_ENABLED BIT(18) -#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19) +#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19) #define IGC_FLAG_TSN_ANY_ENABLED \ (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \ - IGC_FLAG_TSN_LEGACY_ENABLED) + IGC_FLAG_TSN_PREEMPT_ENABLED) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) @@ -736,7 +743,10 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, u32 location); int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule); - +void igc_disable_empty_addr_recv(struct igc_adapter *adapter); +int igc_enable_empty_addr_recv(struct igc_adapter *adapter); +struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu); +void igc_flush_tx_descriptors(struct igc_ring *ring); void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index bf8cdfbba9ff..6320eabb72fe 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc { #define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ #define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ #define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */ #define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ #define IGC_RAR_ENTRIES 16 diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index d19325b0e6e0..7189dfc389ad 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -308,6 +308,8 @@ #define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ #define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */ + #define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ #define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ #define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ @@ -363,6 +365,8 @@ #define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17) /* Receive Descriptor bit definitions */ +#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */ +#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */ #define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ #define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ @@ -372,7 +376,8 @@ #define IGC_RXDEXT_STATERR_LB 0x00040000 /* Advanced Receive Descriptor bit definitions */ -#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000 +#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ #define IGC_RXDEXT_STATERR_L4E 0x20000000 #define IGC_RXDEXT_STATERR_IPE 0x40000000 @@ -396,11 +401,47 @@ #define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ -#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ -#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ -#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ - -#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */ +/* Mask for RX packet buffer size */ +#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0) +#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6) +#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12) +/* Mask for timestamp in RX buffer */ +#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31) +/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */ +#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x)) +/* BMC to OS packet buffer size in KB */ +#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x)) +/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */ +#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x)) +/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */ +#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1) +/* Default value following I225/I226 SW User Manual Section 8.3.1 */ +#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \ + IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2)) +#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \ + IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15)) + +/* Mask for TX packet buffer size */ +#define IGC_TXPB0SIZE_MASK GENMASK(5, 0) +#define IGC_TXPB1SIZE_MASK GENMASK(11, 6) +#define IGC_TXPB2SIZE_MASK GENMASK(17, 12) +#define IGC_TXPB3SIZE_MASK GENMASK(23, 18) +/* Mask for OS to BMC packet buffer size */ +#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24) +/* TX Packet buffer size in KB */ +#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x)) +#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x)) +#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x)) +#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x)) +/* OS to BMC packet buffer size in KB */ +#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x)) +/* Default value following I225/I226 SW User Manual Section 8.3.2 */ +#define IGC_TXPBSIZE_DEFAULT ( \ + IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \ + IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4)) +#define IGC_TXPBSIZE_TSN ( \ + IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \ + IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4)) #define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */ #define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */ @@ -539,8 +580,10 @@ /* Transmit Scheduling */ #define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001 +#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002 #define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008 #define IGC_TQAVCTRL_FUTSCDDIS 0x00000080 +#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000 #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 817838677817..3fc1eded9605 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -8,6 +8,7 @@ #include "igc.h" #include "igc_diag.h" +#include "igc_tsn.h" /* forward declaration */ struct igc_stats { @@ -1781,6 +1782,83 @@ static int igc_ethtool_set_eee(struct net_device *netdev, return 0; } +static int igc_ethtool_get_mm(struct net_device *netdev, + struct ethtool_mm_state *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_fpe_t *fpe = &adapter->fpe; + + ethtool_mmsv_get_mm(&fpe->mmsv, cmd); + cmd->tx_min_frag_size = fpe->tx_min_frag_size; + cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE; + + return 0; +} + +static int igc_ethtool_set_mm(struct net_device *netdev, + struct ethtool_mm_cfg *cmd, + struct netlink_ext_ack *extack) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_fpe_t *fpe = &adapter->fpe; + + fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size); + if (fpe->tx_min_frag_size != cmd->tx_min_frag_size) + NL_SET_ERR_MSG_MOD(extack, + "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)"); + + if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) { + if (cmd->pmac_enabled) + static_branch_inc(&igc_fpe_enabled); + else + static_branch_dec(&igc_fpe_enabled); + } + + ethtool_mmsv_set_mm(&fpe->mmsv, cmd); + + return igc_tsn_offload_apply(adapter); +} + +/** + * igc_ethtool_get_frame_ass_error - Get the frame assembly error count. + * @reg_value: Register value for IGC_PRMEXCPRCNT + * Return: The count of frame assembly errors. + */ +static u64 igc_ethtool_get_frame_ass_error(u32 reg_value) +{ + /* Out of order statistics */ + u32 ooo_frame_cnt, ooo_frag_cnt; + u32 miss_frame_frag_cnt; + + ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value); + ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value); + miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT, + reg_value); + + return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt; +} + +static u64 igc_ethtool_get_frame_smd_error(u32 reg_value) +{ + return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value); +} + +static void igc_ethtool_get_mm_stats(struct net_device *dev, + struct ethtool_mm_stats *stats) +{ + struct igc_adapter *adapter = netdev_priv(dev); + struct igc_hw *hw = &adapter->hw; + u32 reg_value; + + reg_value = rd32(IGC_PRMEXCPRCNT); + + stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value); + stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value); + stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT); + stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT); + stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT); +} + static int igc_ethtool_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -2076,6 +2154,9 @@ static const struct ethtool_ops igc_ethtool_ops = { .get_link_ksettings = igc_ethtool_get_link_ksettings, .set_link_ksettings = igc_ethtool_set_link_ksettings, .self_test = igc_ethtool_diag_test, + .get_mm = igc_ethtool_get_mm, + .get_mm_stats = igc_ethtool_get_mm_stats, + .set_mm = igc_ethtool_set_mm, }; void igc_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b1669d7cf435..27575a1e1777 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2464,8 +2464,7 @@ unmap: return -ENOMEM; } -static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, - int cpu) +struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu) { int index = cpu; @@ -2489,7 +2488,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) if (unlikely(!xdpf)) return -EFAULT; - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); @@ -2549,7 +2548,7 @@ out: } /* This function assumes __netif_tx_lock is held by the caller. */ -static void igc_flush_tx_descriptors(struct igc_ring *ring) +void igc_flush_tx_descriptors(struct igc_ring *ring) { /* Once tail pointer is updated, hardware can fetch the descriptors * any time so we issue a write membar here to ensure all memory @@ -2566,7 +2565,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status) struct igc_ring *ring; if (status & IGC_XDP_TX) { - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); @@ -2638,6 +2637,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) size -= IGC_TS_HDR_LEN; } + if (igc_fpe_is_pmac_enabled(adapter) && + igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) { + /* Advance the ring next-to-clean */ + igc_is_non_eop(rx_ring, rx_desc); + cleaned_count++; + continue; + } + if (!skb) { xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), @@ -3145,6 +3152,11 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) break; + if (igc_fpe_is_pmac_enabled(adapter) && + igc_fpe_transmitted_smd_v(tx_desc)) + ethtool_mmsv_event_handle(&adapter->fpe.mmsv, + ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET); + /* Hold the completions while there's a pending tx hardware * timestamp request from XDP Tx metadata. */ @@ -4037,6 +4049,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) } /** + * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address + * @adapter: Pointer to the igc_adapter structure. + * + * Frame preemption verification requires that packets with the all-zeroes + * MAC address are allowed to be received by the driver. This function adds the + * all-zeroes destination address to the list of acceptable addresses. + * + * Return: 0 on success, negative value otherwise. + */ +int igc_enable_empty_addr_recv(struct igc_adapter *adapter) +{ + u8 empty[ETH_ALEN] = {}; + + return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1); +} + +void igc_disable_empty_addr_recv(struct igc_adapter *adapter) +{ + u8 empty[ETH_ALEN] = {}; + + igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty); +} + +/** * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -5311,6 +5347,9 @@ void igc_down(struct igc_adapter *adapter) igc_disable_all_tx_rings_hw(adapter); igc_clean_all_tx_rings(adapter); igc_clean_all_rx_rings(adapter); + + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_stop(&adapter->fpe.mmsv); } void igc_reinit_locked(struct igc_adapter *adapter) @@ -5835,6 +5874,10 @@ static void igc_watchdog_task(struct work_struct *work) */ igc_tsn_adjust_txtime_offset(adapter); + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, + true); + if (adapter->link_speed != SPEED_1000) goto no_wait; @@ -5870,6 +5913,10 @@ no_wait: netdev_info(netdev, "NIC Link is Down\n"); netif_carrier_off(netdev); + if (adapter->fpe.mmsv.pmac_enabled) + ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv, + false); + /* link state has changed, schedule phy info update */ if (!test_bit(__IGC_DOWN, &adapter->state)) mod_timer(&adapter->phy_info_timer, @@ -6439,6 +6486,10 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, if (!validate_schedule(adapter, qopt)) return -EINVAL; + /* preemptible isn't supported yet */ + if (qopt->mqprio.preemptible_tcs) + return -EOPNOTSUPP; + igc_ptp_read(adapter, &now); if (igc_tsn_is_taprio_activated_by_user(adapter) && @@ -6679,13 +6730,14 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, struct tc_mqprio_qopt_offload *mqprio) { struct igc_hw *hw = &adapter->hw; - int i; + int err, i; if (hw->mac.type != igc_i225) return -EOPNOTSUPP; if (!mqprio->qopt.num_tc) { adapter->strict_priority_enable = false; + netdev_reset_tc(adapter->netdev); goto apply; } @@ -6716,6 +6768,21 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter, igc_save_mqprio_params(adapter, mqprio->qopt.num_tc, mqprio->qopt.offset); + err = netdev_set_num_tc(adapter->netdev, adapter->num_tc); + if (err) + return err; + + for (i = 0; i < adapter->num_tc; i++) { + err = netdev_set_tc_queue(adapter->netdev, i, 1, + adapter->queue_per_tc[i]); + if (err) + return err; + } + + /* In case the card is configured with less than four queues. */ + for (; i < IGC_MAX_TX_QUEUES; i++) + adapter->queue_per_tc[i] = i; + mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS; apply: @@ -6779,7 +6846,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; - ring = igc_xdp_get_tx_ring(adapter, cpu); + ring = igc_get_tx_ring(adapter, cpu); nq = txring_txq(ring); __netif_tx_lock(nq, cpu); @@ -7125,6 +7192,9 @@ static int igc_probe(struct pci_dev *pdev, netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_XSK_ZEROCOPY; + /* enable HW vlan tag insertion/stripping by default */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; @@ -7157,8 +7227,8 @@ static int igc_probe(struct pci_dev *pdev, } /* configure RXPBSIZE and TXPBSIZE */ - wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT); + wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT); timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); @@ -7190,6 +7260,8 @@ static int igc_probe(struct pci_dev *pdev, igc_tsn_clear_schedule(adapter); + igc_fpe_init(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index efc7b30e4211..f4f5c28615d3 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -257,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_EXTTS: - /* Reject requests with unsupported flags */ - if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - /* Reject requests failing to enable both edges. */ if ((rq->extts.flags & PTP_STRICT_FLAGS) && (rq->extts.flags & PTP_ENABLE_FEATURE) && @@ -300,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp, return 0; case PTP_CLK_REQ_PEROUT: - /* Reject requests with unsupported flags */ - if (rq->perout.flags) - return -EOPNOTSUPP; - if (on) { pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT, rq->perout.index); @@ -1162,6 +1151,9 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS; adapter->ptp_caps.n_per_out = IGC_N_PEROUT; + adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; adapter->ptp_caps.n_pins = IGC_N_SDP; adapter->ptp_caps.verify = igc_ptp_verify_pin; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 12ddc5793651..f343c6bfc6be 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -222,6 +222,22 @@ #define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +/* Time sync registers - preemption statistics */ +#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */ +#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */ +#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */ + + /* Preemption Exception Counter */ + #define IGC_PRMEXCPRCNT 0x42A0 +/* Received out of order packets with SMD-C */ +#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF +/* Received out of order packets with SMD-C and wrong Frame CNT */ +#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00 +/* Received out of order packets with SMD-C and wrong Frag CNT */ +#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000 +/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */ +#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000 + /* Transmit Scheduling Registers */ #define IGC_TQAVCTRL 0x3570 #define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n)) diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 1e44374ca1ff..f22cc4d4f459 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -2,9 +2,143 @@ /* Copyright (c) 2019 Intel Corporation */ #include "igc.h" +#include "igc_base.h" #include "igc_hw.h" #include "igc_tsn.h" +#define MIN_MULTPLIER_TX_MIN_FRAG 0 +#define MAX_MULTPLIER_TX_MIN_FRAG 3 +/* Frag size is based on the Section 8.12.2 of the SW User Manual */ +#define TX_MIN_FRAG_SIZE 64 +#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \ + (MAX_MULTPLIER_TX_MIN_FRAG + 1)) + +DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled); + +static int igc_fpe_init_smd_frame(struct igc_ring *ring, + struct igc_tx_buffer *buffer, + struct sk_buff *skb) +{ + dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + if (dma_mapping_error(ring->dev, dma)) { + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->protocol = 0; + buffer->bytecount = skb->len; + buffer->gso_segs = 1; + buffer->time_stamp = jiffies; + dma_unmap_len_set(buffer, len, skb->len); + dma_unmap_addr_set(buffer, dma, dma); + + return 0; +} + +static int igc_fpe_init_tx_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + enum igc_txd_popts_type type) +{ + u32 cmd_type, olinfo_status = 0; + struct igc_tx_buffer *buffer; + union igc_adv_tx_desc *desc; + int err; + + if (!igc_desc_unused(ring)) + return -EBUSY; + + buffer = &ring->tx_buffer_info[ring->next_to_use]; + err = igc_fpe_init_smd_frame(ring, buffer, skb); + if (err) + return err; + + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | + buffer->bytecount; + + olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount); + + switch (type) { + case SMD_V: + case SMD_R: + olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type); + break; + } + + desc = IGC_TX_DESC(ring, ring->next_to_use); + desc->read.cmd_type_len = cpu_to_le32(cmd_type); + desc->read.olinfo_status = cpu_to_le32(olinfo_status); + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma)); + + netdev_tx_sent_queue(txring_txq(ring), skb->len); + + buffer->next_to_watch = desc; + ring->next_to_use = (ring->next_to_use + 1) % ring->count; + + return 0; +} + +static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter, + enum igc_txd_popts_type type) +{ + int cpu = smp_processor_id(); + struct netdev_queue *nq; + struct igc_ring *ring; + struct sk_buff *skb; + int err; + + ring = igc_get_tx_ring(adapter, cpu); + nq = txring_txq(ring); + + skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + skb_put_zero(skb, SMD_FRAME_SIZE); + + __netif_tx_lock(nq, cpu); + + err = igc_fpe_init_tx_descriptor(ring, skb, type); + igc_flush_tx_descriptors(ring); + + __netif_tx_unlock(nq); + + return err; +} + +static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv, + enum ethtool_mpacket type) +{ + struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv); + struct igc_adapter *adapter; + int err; + + adapter = container_of(fpe, struct igc_adapter, fpe); + + if (type == ETHTOOL_MPACKET_VERIFY) { + err = igc_fpe_xmit_smd_frame(adapter, SMD_V); + if (err && net_ratelimit()) + netdev_err(adapter->netdev, "Error sending SMD-V\n"); + } else if (type == ETHTOOL_MPACKET_RESPONSE) { + err = igc_fpe_xmit_smd_frame(adapter, SMD_R); + if (err && net_ratelimit()) + netdev_err(adapter->netdev, "Error sending SMD-R frame\n"); + } +} + +static const struct ethtool_mmsv_ops igc_mmsv_ops = { + .send_mpacket = igc_fpe_send_mpacket, +}; + +void igc_fpe_init(struct igc_adapter *adapter) +{ + adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE; + ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops); +} + static bool is_any_launchtime(struct igc_adapter *adapter) { int i; @@ -37,17 +171,16 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) { unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; - if (adapter->taprio_offload_enable) - new_flags |= IGC_FLAG_TSN_QBV_ENABLED; - if (is_any_launchtime(adapter)) + if (adapter->taprio_offload_enable || is_any_launchtime(adapter) || + adapter->strict_priority_enable) new_flags |= IGC_FLAG_TSN_QBV_ENABLED; if (is_cbs_enabled(adapter)) new_flags |= IGC_FLAG_TSN_QAV_ENABLED; - if (adapter->strict_priority_enable) - new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED; + if (adapter->fpe.mmsv.pmac_enabled) + new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED; return new_flags; } @@ -125,6 +258,29 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc) wr32(IGC_TXARB, txarb); } +/** + * igc_tsn_set_rxpbsize - Set the receive packet buffer size + * @adapter: Pointer to the igc_adapter structure + * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including + * express buffer, BMC buffer, and Best Effort buffer + * + * The IGC_RXPBS register value may include allocations for the Express buffer, + * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer + * (IGC_RXPBS_CFG_TS_EN). + */ +static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter, + u32 rxpbs_exp_bmc_be) +{ + struct igc_hw *hw = &adapter->hw; + u32 rxpbs = rd32(IGC_RXPBS); + + rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK | + IGC_RXPBSIZE_BE_MASK); + rxpbs |= rxpbs_exp_bmc_be; + + wr32(IGC_RXPBS, rxpbs); +} + /* Returns the TSN specific registers to their default values after * the adapter is reset. */ @@ -136,15 +292,18 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) int i; wr32(IGC_GTXOFFSET, 0); - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); + wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); + igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT); + if (igc_is_device_id_i226(hw)) igc_tsn_restore_retx_default(adapter); tqavctrl = rd32(IGC_TQAVCTRL); tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN | - IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS); + IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS | + IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK); wr32(IGC_TQAVCTRL, tqavctrl); @@ -157,16 +316,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_QBVCYCLET_S, 0); wr32(IGC_QBVCYCLET, NSEC_PER_SEC); - /* Reset mqprio TC configuration. */ - netdev_reset_tc(adapter->netdev); - /* Restore the default Tx arbitration: Priority 0 has the highest * priority and is assigned to queue 0 and so on and so forth. */ igc_tsn_tx_arb(adapter, queue_per_tc); adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; - adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED; return 0; } @@ -190,53 +345,51 @@ static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter) wr32(IGC_RETX_CTL, retxctl); } +static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe) +{ + u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1; + + return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG, + MAX_MULTPLIER_TX_MIN_FRAG); +} + +u32 igc_fpe_get_supported_frag_size(u32 frag_size) +{ + const u32 supported_sizes[] = {64, 128, 192, 256}; + + /* Find the smallest supported size that is >= frag_size */ + for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) { + if (frag_size <= supported_sizes[i]) + return supported_sizes[i]; + } + + /* Should not happen */ + return TX_MAX_FRAG_SIZE; +} + static int igc_tsn_enable_offload(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; u32 tqavctrl, baset_l, baset_h; u32 sec, nsec, cycle; ktime_t base_time, systim; + u32 frag_size_mult; int i; wr32(IGC_TSAUXC, 0); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN); wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN); + igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN); + if (igc_is_device_id_i226(hw)) igc_tsn_set_retx_qbvfullthreshold(adapter); if (adapter->strict_priority_enable) { - int err; - - err = netdev_set_num_tc(adapter->netdev, adapter->num_tc); - if (err) - return err; - - for (i = 0; i < adapter->num_tc; i++) { - err = netdev_set_tc_queue(adapter->netdev, i, 1, - adapter->queue_per_tc[i]); - if (err) - return err; - } - - /* In case the card is configured with less than four queues. */ - for (; i < IGC_MAX_TX_QUEUES; i++) - adapter->queue_per_tc[i] = i; - /* Configure queue priorities according to the user provided * mapping. */ igc_tsn_tx_arb(adapter, adapter->queue_per_tc); - - /* Enable legacy TSN mode which will do strict priority without - * any other TSN features. - */ - tqavctrl = rd32(IGC_TQAVCTRL); - tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN; - tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV; - wr32(IGC_TQAVCTRL, tqavctrl); - - return 0; } for (i = 0; i < adapter->num_tx_queues; i++) { @@ -361,10 +514,16 @@ skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } - tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS; - + tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS | + IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK); tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV; + if (adapter->fpe.mmsv.pmac_enabled) + tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA; + + frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe); + tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult); + adapter->qbv_count++; cycle = adapter->cycle_time; @@ -425,6 +584,14 @@ int igc_tsn_reset(struct igc_adapter *adapter) unsigned int new_flags; int err = 0; + if (adapter->fpe.mmsv.pmac_enabled) { + err = igc_enable_empty_addr_recv(adapter); + if (err && net_ratelimit()) + netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n"); + } else { + igc_disable_empty_addr_recv(adapter); + } + new_flags = igc_tsn_new_flags(adapter); if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index 98ec845a86bf..c2a77229207b 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -4,9 +4,61 @@ #ifndef _IGC_TSN_H_ #define _IGC_TSN_H_ +#define IGC_RX_MIN_FRAG_SIZE 60 +#define SMD_FRAME_SIZE 60 + +enum igc_txd_popts_type { + SMD_V = 0x01, + SMD_R = 0x02, +}; + +DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled); + +void igc_fpe_init(struct igc_adapter *adapter); +u32 igc_fpe_get_supported_frag_size(u32 frag_size); int igc_tsn_offload_apply(struct igc_adapter *adapter); int igc_tsn_reset(struct igc_adapter *adapter); void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter); bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter); +static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter) +{ + return static_branch_unlikely(&igc_fpe_enabled) && + adapter->fpe.mmsv.pmac_enabled; +} + +static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter, + union igc_adv_rx_desc *rx_desc, + unsigned int size, void *pktbuf) +{ + u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error); + int smd; + + smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error); + if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R) + return false; + + if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) { + struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv; + enum ethtool_mmsv_event event; + + if (smd == IGC_RXD_STAT_SMD_TYPE_V) + event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET; + else + event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET; + + ethtool_mmsv_event_handle(mmsv, event); + } + + return true; +} + +static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc) +{ + u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status); + u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status); + + return smd == SMD_V; +} + #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index b456d102655a..2e7738f41c58 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -4,12 +4,14 @@ # Makefile for the Intel(R) 10GbE PCI Express ethernet driver # +subdir-ccflags-y += -I$(src) obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o ixgbe_e610.o + ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \ + devlink/region.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c new file mode 100644 index 000000000000..54f1b83dfe42 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025, Intel Corporation. */ + +#include "ixgbe.h" +#include "devlink.h" +#include "ixgbe_fw_update.h" + +struct ixgbe_info_ctx { + char buf[128]; + struct ixgbe_orom_info pending_orom; + struct ixgbe_nvm_info pending_nvm; + struct ixgbe_netlist_info pending_netlist; + struct ixgbe_hw_dev_caps dev_caps; +}; + +enum ixgbe_devlink_version_type { + IXGBE_DL_VERSION_RUNNING, + IXGBE_DL_VERSION_STORED +}; + +static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx) +{ + u8 dsn[8]; + + /* Copy the DSN into an array in Big Endian format */ + put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn); + + snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); +} + +static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_nvm_version nvm_ver; + + ctx->buf[0] = '\0'; + + if (hw->mac.type == ixgbe_mac_e610) { + struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_orom) + orom = &ctx->pending_orom; + + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + orom->major, orom->build, orom->patch); + return; + } + + ixgbe_get_oem_prod_version(hw, &nvm_ver); + if (nvm_ver.oem_valid) { + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x", + nvm_ver.oem_major, nvm_ver.oem_minor, + nvm_ver.oem_release); + + return; + } + + ixgbe_get_orom_version(hw, &nvm_ver); + if (nvm_ver.or_valid) + snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d", + nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); +} + +static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_nvm_version nvm_ver; + + if (hw->mac.type == ixgbe_mac_e610) { + u32 eetrack = hw->flash.nvm.eetrack; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_nvm) + eetrack = ctx->pending_nvm.eetrack; + + snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack); + return; + } + + ixgbe_get_oem_prod_version(hw, &nvm_ver); + + /* No ETRACK version for OEM */ + if (nvm_ver.oem_valid) { + ctx->buf[0] = '\0'; + return; + } + + ixgbe_get_etk_id(hw, &nvm_ver); + snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id); +} + +static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx) +{ + struct ixgbe_hw *hw = &adapter->hw; + + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + hw->api_maj_ver, hw->api_min_ver, hw->api_patch); +} + +static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx) +{ + struct ixgbe_hw *hw = &adapter->hw; + + snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); +} + +static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_nvm) + nvm = &ctx->pending_nvm; + + snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev); +} + +static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_orom) + orom = &ctx->pending_orom; + + snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev); +} + +static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_nvm) + nvm = &ctx->pending_nvm; + + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); +} + +static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_netlist) + netlist = &ctx->pending_netlist; + + /* The netlist version fields are BCD formatted */ + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", + netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); +} + +static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter, + struct ixgbe_info_ctx *ctx, + enum ixgbe_devlink_version_type type) +{ + struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist; + + if (type == IXGBE_DL_VERSION_STORED && + ctx->dev_caps.common_cap.nvm_update_pending_netlist) + netlist = &ctx->pending_netlist; + + snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); +} + +static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw, + struct ixgbe_info_ctx *ctx, + struct netlink_ext_ack *extack) +{ + bool *pending_orom, *pending_nvm, *pending_netlist; + int err; + + err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Unable to discover device capabilities"); + return err; + } + + pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom; + pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm; + pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist; + + if (*pending_orom) { + err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom); + if (err) + *pending_orom = false; + } + + if (*pending_nvm) { + err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm); + if (err) + *pending_nvm = false; + } + + if (*pending_netlist) { + err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist); + if (err) + *pending_netlist = false; + } + + return 0; +} + +static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter, + struct devlink_info_req *req, + struct ixgbe_info_ctx *ctx) +{ + int err; + + ixgbe_info_fw_api(adapter, ctx); + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + ctx->buf); + if (err) + return err; + + ixgbe_info_fw_build(adapter, ctx); + err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf); + if (err) + return err; + + ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf); + if (err) + return err; + + ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf); + if (err) + return err; + + ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf); + if (err) + return err; + + ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf); + if (err) + return err; + + ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + return devlink_info_version_running_put(req, "fw.netlist.build", + ctx->buf); +} + +static int +ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter, + struct devlink_info_req *req, + struct ixgbe_info_ctx *ctx) +{ + int err; + + ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, + ctx->buf); + if (err) + return err; + + ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + ctx->buf); + if (err) + return err; + + ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf); + if (err) + return err; + + ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf); + if (err) + return err; + + ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf); + if (err) + return err; + + ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED); + err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf); + if (err) + return err; + + ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED); + return devlink_info_version_stored_put(req, "fw.netlist.build", + ctx->buf); +} + +static int ixgbe_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_info_ctx *ctx; + int err; + + ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + if (hw->mac.type == ixgbe_mac_e610) + ixgbe_refresh_fw_version(adapter); + + ixgbe_info_get_dsn(adapter, ctx); + err = devlink_info_serial_number_put(req, ctx->buf); + if (err) + goto free_ctx; + + err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf)); + if (err) + goto free_ctx; + + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + ctx->buf); + if (err) + goto free_ctx; + + ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, + ctx->buf); + if (err) + goto free_ctx; + + ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING); + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, + ctx->buf); + if (err || hw->mac.type != ixgbe_mac_e610) + goto free_ctx; + + err = ixgbe_set_ctx_dev_caps(hw, ctx, extack); + if (err) + goto free_ctx; + + err = ixgbe_devlink_info_get_e610(adapter, req, ctx); + if (err) + goto free_ctx; + + err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx); +free_ctx: + kfree(ctx); + return err; +} + +/** + * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware + * @devlink: pointer to the devlink instance to reload + * @netns_change: if true, the network namespace is changing + * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE + * @limit: limits on what reload should do, such as not resetting + * @extack: netlink extended ACK structure + * + * Allow user to activate new Embedded Management Processor firmware by + * issuing device specific EMP reset. Called in response to + * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE. + * + * Note that teardown and rebuild of the driver state happens automatically as + * part of an interrupt and watchdog task. This is because all physical + * functions on the device must be able to reset when an EMP reset occurs from + * any source. + * + * Return: the exit code of the operation. + */ +static int ixgbe_devlink_reload_empr_start(struct devlink *devlink, + bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_hw *hw = &adapter->hw; + u8 pending; + int err; + + if (hw->mac.type != ixgbe_mac_e610) + return -EOPNOTSUPP; + + err = ixgbe_get_pending_updates(adapter, &pending, extack); + if (err) + return err; + + /* Pending is a bitmask of which flash banks have a pending update, + * including the main NVM bank, the Option ROM bank, and the netlist + * bank. If any of these bits are set, then there is a pending update + * waiting to be activated. + */ + if (!pending) { + NL_SET_ERR_MSG_MOD(extack, "No pending firmware update"); + return -ECANCELED; + } + + if (adapter->fw_emp_reset_disabled) { + NL_SET_ERR_MSG_MOD(extack, + "EMP reset is not available. To activate firmware, a reboot or power cycle is needed"); + return -ECANCELED; + } + + err = ixgbe_aci_nvm_update_empr(hw); + if (err) + NL_SET_ERR_MSG_MOD(extack, + "Failed to trigger EMP device reset to reload firmware"); + + return err; +} + +/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */ +#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20 + +/** + * ixgbe_devlink_reload_empr_finish - finishes EMP reset + * @devlink: pointer to the devlink instance + * @action: the action to perform. + * @limit: limits on what reload should do + * @actions_performed: actions performed + * @extack: netlink extended ACK structure + * + * Wait for new NVM to be loaded during EMP reset. + * + * Return: -ETIME when timer is exceeded, 0 on success. + */ +static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_hw *hw = &adapter->hw; + int i = 0; + u32 fwsm; + + do { + /* Just right away after triggering EMP reset the FWSM register + * may be not cleared yet, so begin the loop with the delay + * in order to not check the not updated register. + */ + mdelay(500); + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC) + return -ETIME; + + } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT)); + + *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); + + adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH | + IXGBE_FLAG2_FW_ROLLBACK); + + return 0; +} + +static const struct devlink_ops ixgbe_devlink_ops = { + .info_get = ixgbe_devlink_info_get, + .supported_flash_update_params = + DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, + .flash_update = ixgbe_flash_pldm_image, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + .reload_down = ixgbe_devlink_reload_empr_start, + .reload_up = ixgbe_devlink_reload_empr_finish, +}; + +/** + * ixgbe_allocate_devlink - Allocate devlink instance + * @dev: device to allocate devlink for + * + * Allocate a devlink instance for this physical function. + * + * Return: pointer to the device adapter structure on success, + * ERR_PTR(-ENOMEM) when allocation failed. + */ +struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev) +{ + struct ixgbe_adapter *adapter; + struct devlink *devlink; + + devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev); + if (!devlink) + return ERR_PTR(-ENOMEM); + + adapter = devlink_priv(devlink); + adapter->devlink = devlink; + + return adapter; +} + +/** + * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN + * @adapter: pointer to the device adapter structure + * @ppid: struct with switch id information + */ +static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter, + struct netdev_phys_item_id *ppid) +{ + u64 id = pci_get_dsn(adapter->pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * ixgbe_devlink_register_port - Register devlink port + * @adapter: pointer to the device adapter structure + * + * Create and register a devlink_port for this physical function. + * + * Return: 0 on success, error code on failure. + */ +int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter) +{ + struct devlink_port *devlink_port = &adapter->devlink_port; + struct devlink *devlink = adapter->devlink; + struct device *dev = &adapter->pdev->dev; + struct devlink_port_attrs attrs = {}; + int err; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = adapter->hw.bus.func; + ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + + err = devl_port_register(devlink, devlink_port, 0); + if (err) { + dev_err(dev, + "devlink port registration failed, err %d\n", err); + } + + return err; +} diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h new file mode 100644 index 000000000000..381558058048 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025, Intel Corporation. */ + +#ifndef _IXGBE_DEVLINK_H_ +#define _IXGBE_DEVLINK_H_ + +struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev); +int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter); +void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter); +void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter); + +#endif /* _IXGBE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c new file mode 100644 index 000000000000..76f6571c3c34 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025, Intel Corporation. */ + +#include "ixgbe.h" +#include "devlink.h" + +#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024) + +static const struct devlink_region_ops ixgbe_nvm_region_ops; +static const struct devlink_region_ops ixgbe_sram_region_ops; + +static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw, + const struct devlink_region_ops *ops, + bool *read_shadow_ram, u32 *nvm_size) +{ + if (ops == &ixgbe_nvm_region_ops) { + *read_shadow_ram = false; + *nvm_size = hw->flash.flash_size; + } else if (ops == &ixgbe_sram_region_ops) { + *read_shadow_ram = true; + *nvm_size = hw->flash.sr_words * 2u; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content + * @devlink: the devlink instance + * @ops: the devlink region being snapshotted + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd. + * + * Capture a snapshot of the whole requested NVM region. + * + * No need to worry with freeing @data, devlink core takes care if it. + * + * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when + * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error + * occurs during reading. + */ +static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, u8 **data) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_hw *hw = &adapter->hw; + bool read_shadow_ram; + u8 *nvm_data, *buf; + u32 nvm_size, left; + u8 num_blks; + int err; + + err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size); + if (err) + return err; + + nvm_data = kvzalloc(nvm_size, GFP_KERNEL); + if (!nvm_data) + return -ENOMEM; + + num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE); + buf = nvm_data; + left = nvm_size; + + for (int i = 0; i < num_blks; i++) { + u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left); + + /* Need to acquire NVM lock during each loop run because the + * total period of reading whole NVM is longer than the maximum + * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT. + */ + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to acquire NVM semaphore"); + kvfree(nvm_data); + return -EBUSY; + } + + err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE, + &read_sz, buf, read_shadow_ram); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to read RAM content"); + ixgbe_release_nvm(hw); + kvfree(nvm_data); + return -EIO; + } + + ixgbe_release_nvm(hw); + + buf += read_sz; + left -= read_sz; + } + + *data = nvm_data; + return 0; +} + +/** + * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities + * @devlink: the devlink instance + * @ops: the devlink region being snapshotted + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_NEW for + * the device-caps devlink region. + * + * Capture a snapshot of the device capabilities reported by firmware. + * + * No need to worry with freeing @data, devlink core takes care if it. + * + * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of + * the reading operation. + */ +static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_aci_cmd_list_caps_elem *caps; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!caps) + return -ENOMEM; + + err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL, + ixgbe_aci_opc_list_dev_caps); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to read device capabilities"); + kvfree(caps); + return err; + } + + *data = (u8 *)caps; + return 0; +} + +/** + * ixgbe_devlink_nvm_read - Read a portion of NVM flash content + * @devlink: the devlink instance + * @ops: the devlink region to snapshot + * @extack: extended ACK response structure + * @offset: the offset to start at + * @size: the amount to read + * @data: the data buffer to read into + * + * This function is called in response to DEVLINK_CMD_REGION_READ to directly + * read a section of the NVM contents. + * + * Read from either the nvm-flash region either shadow-ram region. + * + * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when + * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error + * occurs during reading. + */ +static int ixgbe_devlink_nvm_read(struct devlink *devlink, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u64 offset, u32 size, u8 *data) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct ixgbe_hw *hw = &adapter->hw; + bool read_shadow_ram; + u32 nvm_size; + int err; + + err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size); + if (err) + return err; + + if (offset + size > nvm_size) { + NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size"); + return -ERANGE; + } + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + return -EBUSY; + } + + err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); + ixgbe_release_nvm(hw); + return -EIO; + } + + ixgbe_release_nvm(hw); + return 0; +} + +static const struct devlink_region_ops ixgbe_nvm_region_ops = { + .name = "nvm-flash", + .destructor = kvfree, + .snapshot = ixgbe_devlink_nvm_snapshot, + .read = ixgbe_devlink_nvm_read, +}; + +static const struct devlink_region_ops ixgbe_sram_region_ops = { + .name = "shadow-ram", + .destructor = kvfree, + .snapshot = ixgbe_devlink_nvm_snapshot, + .read = ixgbe_devlink_nvm_read, +}; + +static const struct devlink_region_ops ixgbe_devcaps_region_ops = { + .name = "device-caps", + .destructor = kvfree, + .snapshot = ixgbe_devlink_devcaps_snapshot, +}; + +/** + * ixgbe_devlink_init_regions - Initialize devlink regions + * @adapter: adapter instance + * + * Create devlink regions used to enable access to dump the contents of the + * flash memory of the device. + */ +void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter) +{ + struct devlink *devlink = adapter->devlink; + struct device *dev = &adapter->pdev->dev; + u64 nvm_size, sram_size; + + if (adapter->hw.mac.type != ixgbe_mac_e610) + return; + + nvm_size = adapter->hw.flash.flash_size; + adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops, + 1, nvm_size); + if (IS_ERR(adapter->nvm_region)) { + dev_err(dev, + "Failed to create NVM devlink region, err %ld\n", + PTR_ERR(adapter->nvm_region)); + adapter->nvm_region = NULL; + } + + sram_size = adapter->hw.flash.sr_words * 2u; + adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops, + 1, sram_size); + if (IS_ERR(adapter->sram_region)) { + dev_err(dev, + "Failed to create shadow-ram devlink region, err %ld\n", + PTR_ERR(adapter->sram_region)); + adapter->sram_region = NULL; + } + + adapter->devcaps_region = devl_region_create(devlink, + &ixgbe_devcaps_region_ops, + 10, IXGBE_ACI_MAX_BUFFER_SIZE); + if (IS_ERR(adapter->devcaps_region)) { + dev_err(dev, + "Failed to create device-caps devlink region, err %ld\n", + PTR_ERR(adapter->devcaps_region)); + adapter->devcaps_region = NULL; + } +} + +/** + * ixgbe_devlink_destroy_regions - Destroy devlink regions + * @adapter: adapter instance + * + * Remove previously created regions for this adapter instance. + */ +void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type != ixgbe_mac_e610) + return; + + if (adapter->nvm_region) + devl_region_destroy(adapter->nvm_region); + + if (adapter->sram_region) + devl_region_destroy(adapter->sram_region); + + if (adapter->devcaps_region) + devl_region_destroy(adapter->devcaps_region); +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e6a380d4929b..47311b134a7a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -17,6 +17,8 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> +#include <net/devlink.h> + #include "ixgbe_type.h" #include "ixgbe_common.h" #include "ixgbe_dcb.h" @@ -612,6 +614,11 @@ struct ixgbe_adapter { struct bpf_prog *xdp_prog; struct pci_dev *pdev; struct mii_bus *mii_bus; + struct devlink *devlink; + struct devlink_port devlink_port; + struct devlink_region *nvm_region; + struct devlink_region *sram_region; + struct devlink_region *devcaps_region; unsigned long state; @@ -667,6 +674,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20) #define IXGBE_FLAG2_NO_MEDIA BIT(21) #define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22) +#define IXGBE_FLAG2_API_MISMATCH BIT(23) +#define IXGBE_FLAG2_FW_ROLLBACK BIT(24) /* Tx fast path data */ int num_tx_queues; @@ -755,6 +764,8 @@ struct ixgbe_adapter { u32 atr_sample_rate; spinlock_t fdir_perfect_lock; + bool fw_emp_reset_disabled; + #ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif /* IXGBE_FCOE */ @@ -830,6 +841,17 @@ struct ixgbe_adapter { spinlock_t vfs_lock; }; +struct ixgbe_netdevice_priv { + struct ixgbe_adapter *adapter; +}; + +static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev) +{ + struct ixgbe_netdevice_priv *priv = netdev_priv(netdev); + + return priv->adapter; +} + static inline int ixgbe_determine_xdp_q_idx(int cpu) { if (static_key_enabled(&ixgbe_xdp_locking_key)) @@ -945,6 +967,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, u16 subdevice_id); +void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter); +void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 4aaaea3b5f8f..444da982593f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1169,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, + .read_pba_string = &ixgbe_read_pba_string_generic, }; static const struct ixgbe_phy_operations phy_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 964988b4d58b..d5b1b974b4a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -2230,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, + .read_pba_string = &ixgbe_read_pba_string_generic, }; static const struct ixgbe_phy_operations phy_ops_82599 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 7beaf6ea57f9..5784d5d1896e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -332,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw) * Devices in the second generation: * 82599 * X540 + * E610 **/ int ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 19d6b6fa8fb3..3dd5a16a14df 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) @@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); @@ -167,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; @@ -184,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 bw_pct) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; } @@ -193,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; @@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 bw_pct) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; } @@ -219,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; @@ -230,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; } @@ -239,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; @@ -250,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } @@ -258,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, u8 setting) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != @@ -269,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, u8 *setting) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } static void ixgbe_dcbnl_devreset(struct net_device *dev) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -295,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; struct ixgbe_hw *hw = &adapter->hw; int ret = DCB_NO_HW_CHG; @@ -383,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); switch (capid) { case DCB_CAP_ATTR_PG: @@ -420,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { @@ -447,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; } @@ -471,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) */ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, @@ -486,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; @@ -506,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err; __u8 max_tc = 0; @@ -559,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; @@ -584,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; u8 *prio_tc; int err; @@ -616,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -661,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -705,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); return adapter->dcbx_cap; } static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ieee_ets ets = {0}; struct ieee_pfc pfc = {0}; int err = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c index 00935747c8c5..71ea25de1bac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -344,6 +344,40 @@ void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode) } /** + * ixgbe_aci_get_fw_ver - Get the firmware version + * @hw: pointer to the HW struct + * + * Get the firmware version using ACI command (0x0001). + * + * Return: the exit code of the operation. + */ +static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_get_ver *resp; + struct ixgbe_aci_desc desc; + int err; + + resp = &desc.params.get_ver; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver); + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + if (!err) { + hw->fw_branch = resp->fw_branch; + hw->fw_maj_ver = resp->fw_major; + hw->fw_min_ver = resp->fw_minor; + hw->fw_patch = resp->fw_patch; + hw->fw_build = le32_to_cpu(resp->fw_build); + hw->api_branch = resp->api_branch; + hw->api_maj_ver = resp->api_major; + hw->api_min_ver = resp->api_minor; + hw->api_patch = resp->api_patch; + } + + return err; +} + +/** * ixgbe_aci_req_res - request a common resource * @hw: pointer to the HW struct * @res: resource ID @@ -554,6 +588,20 @@ static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw, break; case IXGBE_ACI_CAPS_NVM_VER: break; + case IXGBE_ACI_CAPS_PENDING_NVM_VER: + caps->nvm_update_pending_nvm = true; + break; + case IXGBE_ACI_CAPS_PENDING_OROM_VER: + caps->nvm_update_pending_orom = true; + break; + case IXGBE_ACI_CAPS_PENDING_NET_VER: + caps->nvm_update_pending_netlist = true; + break; + case IXGBE_ACI_CAPS_NVM_MGMT: + caps->nvm_unified_update = + (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? + true : false; + break; case IXGBE_ACI_CAPS_MAX_MTU: caps->max_mtu = number; break; @@ -1411,6 +1459,61 @@ int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask) } /** + * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Get firmware version and start the hardware using the generic + * start_hw() and ixgbe_start_hw_gen2() functions. + * + * Return: the exit code of the operation. + */ +static int ixgbe_start_hw_e610(struct ixgbe_hw *hw) +{ + int err; + + err = ixgbe_aci_get_fw_ver(hw); + if (err) + return err; + + err = ixgbe_start_hw_generic(hw); + if (err) + return err; + + ixgbe_start_hw_gen2(hw); + + return 0; +} + +/** + * ixgbe_aci_set_port_id_led - set LED value for the given port + * @hw: pointer to the HW struct + * @orig_mode: set LED original mode + * + * Set LED value for the given port (0x06E9) + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode) +{ + struct ixgbe_aci_cmd_set_port_id_led *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.set_port_id_led; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led); + + cmd->lport_num = (u8)hw->bus.func; + cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID; + + if (orig_mode) + cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG; + else + cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** * ixgbe_get_media_type_e610 - Gets media type * @hw: pointer to the HW struct * @@ -1743,6 +1846,38 @@ void ixgbe_disable_rx_e610(struct ixgbe_hw *hw) } /** + * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode + * @hw: pointer to hardware structure + * + * Check FW NVM recovery mode by reading the value of + * the dedicated register. + * + * Return: true if FW is in recovery mode, otherwise false. + */ +static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw) +{ + u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM); + + return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M); +} + +/** + * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode + * @hw: pointer to hardware structure + * + * Check FW NVM rollback mode by reading the value of + * the dedicated register. + * + * Return: true if FW is in rollback mode, otherwise false. + */ +static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw) +{ + u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM); + + return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M); +} + +/** * ixgbe_init_phy_ops_e610 - PHY specific init * @hw: pointer to hardware structure * @@ -2226,6 +2361,131 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, } /** + * ixgbe_aci_erase_nvm - erase NVM sector + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * + * Erase the NVM sector using the ACI command (0x0702). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid) +{ + struct ixgbe_aci_cmd_nvm *cmd; + struct ixgbe_aci_desc desc; + __le16 len; + int err; + + /* Read a length value from SR, so module_typeid is equal to 0, + * calculate offset where module size is placed from bytes to words + * set last command and read from SR values to true. + */ + err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true, + true); + if (err) + return err; + + cmd = &desc.params.nvm; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase); + + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->length = len; + cmd->offset_low = 0; + cmd->offset_high = 0; + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/** + * ixgbe_aci_update_nvm - update NVM + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @command_flags: command parameters + * + * Update the NVM using the ACI command (0x0703). + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid, + u32 offset, u16 length, void *data, + bool last_command, u8 command_flags) +{ + struct ixgbe_aci_cmd_nvm *cmd; + struct ixgbe_aci_desc desc; + + cmd = &desc.params.nvm; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) + return -EINVAL; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write); + + cmd->cmd_flags |= command_flags; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset); + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); + + return ixgbe_aci_send_cmd(hw, &desc, data, length); +} + +/** + * ixgbe_nvm_write_activate - NVM activate write + * @hw: pointer to the HW struct + * @cmd_flags: flags for write activate command + * @response_flags: response indicators from firmware + * + * Update the control word with the required banks' validity bits + * and dumps the Shadow RAM to flash using ACI command (0x0707). + * + * cmd_flags controls which banks to activate, the preservation level to use + * when activating the NVM bank, and whether an EMP reset is required for + * activation. + * + * Note that the 16bit cmd_flags value is split between two separate 1 byte + * flag values in the descriptor. + * + * On successful return of the firmware command, the response_flags variable + * is updated with the flags reported by firmware indicating certain status, + * such as whether EMP reset is enabled. + * + * Return: the exit code of the operation. + */ +int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags, + u8 *response_flags) +{ + struct ixgbe_aci_cmd_nvm *cmd; + struct ixgbe_aci_desc desc; + s32 err; + + cmd = &desc.params.nvm; + ixgbe_fill_dflt_direct_cmd_desc(&desc, + ixgbe_aci_opc_nvm_write_activate); + + cmd->cmd_flags = (u8)(cmd_flags & 0xFF); + cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK, + cmd_flags); + + err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + if (!err && response_flags) + *response_flags = cmd->cmd_flags; + + return err; +} + +/** * ixgbe_nvm_validate_checksum - validate checksum * @hw: pointer to the HW struct * @@ -2267,6 +2527,955 @@ int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) } /** + * ixgbe_discover_flash_size - Discover the available flash size + * @hw: pointer to the HW struct + * + * The device flash could be up to 16MB in size. However, it is possible that + * the actual size is smaller. Use bisection to determine the accessible size + * of flash memory. + * + * Return: the exit code of the operation. + */ +static int ixgbe_discover_flash_size(struct ixgbe_hw *hw) +{ + u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1; + int err; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + while ((max_size - min_size) > 1) { + u32 offset = (max_size + min_size) / 2; + u32 len = 1; + u8 data; + + err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false); + if (err == -EIO && + hw->aci.last_status == IXGBE_ACI_RC_EINVAL) { + err = 0; + max_size = offset; + } else if (!err) { + min_size = offset; + } else { + /* an unexpected error occurred */ + goto err_read_flat_nvm; + } + } + + hw->flash.flash_size = max_size; + +err_read_flat_nvm: + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word + * @hw: pointer to the HW structure + * @offset: the word offset of the Shadow RAM word to read + * @pointer: pointer value read from Shadow RAM + * + * Read the given Shadow RAM word, and convert it to a pointer value specified + * in bytes. This function assumes the specified offset is a valid pointer + * word. + * + * Each pointer word specifies whether it is stored in word size or 4KB + * sector size by using the highest bit. The reported pointer value will be in + * bytes, intended for flat NVM reads. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset, + u32 *pointer) +{ + u16 value; + int err; + + err = ixgbe_read_ee_aci_e610(hw, offset, &value); + if (err) + return err; + + /* Determine if the pointer is in 4KB or word units */ + if (value & IXGBE_SR_NVM_PTR_4KB_UNITS) + *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K; + else + *pointer = value * sizeof(u16); + + return 0; +} + +/** + * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word + * @hw: pointer to the HW structure + * @offset: the word offset of the Shadow RAM to read + * @size: size value read from the Shadow RAM + * + * Read the given Shadow RAM word, and convert it to an area size value + * specified in bytes. This function assumes the specified offset is a valid + * area size word. + * + * Each area size word is specified in 4KB sector units. This function reports + * the size in bytes, intended for flat NVM reads. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size) +{ + u16 value; + int err; + + err = ixgbe_read_ee_aci_e610(hw, offset, &value); + if (err) + return err; + + /* Area sizes are always specified in 4KB units */ + *size = value * SZ_4K; + + return 0; +} + +/** + * ixgbe_determine_active_flash_banks - Discover active bank for each module + * @hw: pointer to the HW struct + * + * Read the Shadow RAM control word and determine which banks are active for + * the NVM, OROM, and Netlist modules. Also read and calculate the associated + * pointer and size. These values are then cached into the ixgbe_flash_info + * structure for later use in order to calculate the correct offset to read + * from the active module. + * + * Return: the exit code of the operation. + */ +static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw) +{ + struct ixgbe_bank_info *banks = &hw->flash.banks; + u16 ctrl_word; + int err; + + err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD, + &ctrl_word); + if (err) + return err; + + if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) != + IXGBE_SR_CTRL_WORD_VALID) + return -ENODATA; + + if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK)) + banks->nvm_bank = IXGBE_1ST_FLASH_BANK; + else + banks->nvm_bank = IXGBE_2ND_FLASH_BANK; + + if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK)) + banks->orom_bank = IXGBE_1ST_FLASH_BANK; + else + banks->orom_bank = IXGBE_2ND_FLASH_BANK; + + if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK)) + banks->netlist_bank = IXGBE_1ST_FLASH_BANK; + else + banks->netlist_bank = IXGBE_2ND_FLASH_BANK; + + err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR, + &banks->nvm_ptr); + if (err) + return err; + + err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE, + &banks->nvm_size); + if (err) + return err; + + err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR, + &banks->orom_ptr); + if (err) + return err; + + err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE, + &banks->orom_size); + if (err) + return err; + + err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR, + &banks->netlist_ptr); + if (err) + return err; + + err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE, + &banks->netlist_size); + + return err; +} + +/** + * ixgbe_get_flash_bank_offset - Get offset into requested flash bank + * @hw: pointer to the HW structure + * @bank: whether to read from the active or inactive flash bank + * @module: the module to read from + * + * Based on the module, lookup the module offset from the beginning of the + * flash. + * + * Return: the flash offset. Note that a value of zero is invalid and must be + * treated as an error. + */ +static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u16 module) +{ + struct ixgbe_bank_info *banks = &hw->flash.banks; + enum ixgbe_flash_bank active_bank; + bool second_bank_active; + u32 offset, size; + + switch (module) { + case IXGBE_E610_SR_1ST_NVM_BANK_PTR: + offset = banks->nvm_ptr; + size = banks->nvm_size; + active_bank = banks->nvm_bank; + break; + case IXGBE_E610_SR_1ST_OROM_BANK_PTR: + offset = banks->orom_ptr; + size = banks->orom_size; + active_bank = banks->orom_bank; + break; + case IXGBE_E610_SR_NETLIST_BANK_PTR: + offset = banks->netlist_ptr; + size = banks->netlist_size; + active_bank = banks->netlist_bank; + break; + default: + return 0; + } + + switch (active_bank) { + case IXGBE_1ST_FLASH_BANK: + second_bank_active = false; + break; + case IXGBE_2ND_FLASH_BANK: + second_bank_active = true; + break; + default: + return 0; + } + + /* The second flash bank is stored immediately following the first + * bank. Based on whether the 1st or 2nd bank is active, and whether + * we want the active or inactive bank, calculate the desired offset. + */ + switch (bank) { + case IXGBE_ACTIVE_FLASH_BANK: + return offset + (second_bank_active ? size : 0); + case IXGBE_INACTIVE_FLASH_BANK: + return offset + (second_bank_active ? 0 : size); + } + + return 0; +} + +/** + * ixgbe_read_flash_module - Read a word from one of the main NVM modules + * @hw: pointer to the HW structure + * @bank: which bank of the module to read + * @module: the module to read + * @offset: the offset into the module in bytes + * @data: storage for the word read from the flash + * @length: bytes of data to read + * + * Read data from the specified flash module. The bank parameter indicates + * whether or not to read from the active bank or the inactive bank of that + * module. + * + * The word will be read using flat NVM access, and relies on the + * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks() + * during initialization. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_flash_module(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u16 module, u32 offset, u8 *data, u32 length) +{ + u32 start; + int err; + + start = ixgbe_get_flash_bank_offset(hw, bank, module); + if (!start) + return -EINVAL; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (err) + return err; + + err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false); + + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_read_nvm_module - Read from the active main NVM module + * @hw: pointer to the HW structure + * @bank: whether to read from active or inactive NVM module + * @offset: offset into the NVM module to read, in words + * @data: storage for returned word value + * + * Read the specified word from the active NVM module. This includes the CSS + * header at the start of the NVM module. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_nvm_module(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 offset, u16 *data) +{ + __le16 data_local; + int err; + + err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR, + offset * sizeof(data_local), + (u8 *)&data_local, + sizeof(data_local)); + if (!err) + *data = le16_to_cpu(data_local); + + return err; +} + +/** + * ixgbe_read_netlist_module - Read data from the netlist module area + * @hw: pointer to the HW structure + * @bank: whether to read from the active or inactive module + * @offset: offset into the netlist to read from + * @data: storage for returned word value + * + * Read a word from the specified netlist bank. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_netlist_module(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 offset, u16 *data) +{ + __le16 data_local; + int err; + + err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, + offset * sizeof(data_local), + (u8 *)&data_local, sizeof(data_local)); + if (!err) + *data = le16_to_cpu(data_local); + + return err; +} + +/** + * ixgbe_read_orom_module - Read from the active Option ROM module + * @hw: pointer to the HW structure + * @bank: whether to read from active or inactive OROM module + * @offset: offset into the OROM module to read, in words + * @data: storage for returned word value + * + * Read the specified word from the active Option ROM module of the flash. + * Note that unlike the NVM module, the CSS data is stored at the end of the + * module instead of at the beginning. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_orom_module(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 offset, u16 *data) +{ + __le16 data_local; + int err; + + err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR, + offset * sizeof(data_local), + (u8 *)&data_local, sizeof(data_local)); + if (!err) + *data = le16_to_cpu(data_local); + + return err; +} + +/** + * ixgbe_get_nvm_css_hdr_len - Read the CSS header length + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the + * Authentication header size, and then convert to words. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int err; + + err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (err) + return err; + + err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (err) + return err; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size. + */ + hdr_len_dword = (hdr_len_h << 16) | hdr_len_l; + *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN; + + return 0; +} + +/** + * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy + * @hw: pointer to the HW structure + * @bank: whether to read from the active or inactive NVM module + * @offset: offset into the Shadow RAM copy to read, in words + * @data: storage for returned word value + * + * Read the specified word from the copy of the Shadow RAM found in the + * specified NVM module. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 offset, u16 *data) +{ + u32 hdr_len; + int err; + + err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); + if (err) + return err; + + hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP); + + return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data); +} + +/** + * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @srev: storage for security revision + * + * Read the security revision out of the CSS header of the active NVM module + * bank. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, u32 *srev) +{ + u16 srev_l, srev_h; + int err; + + err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l); + if (err) + return err; + + err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h); + if (err) + return err; + + *srev = (srev_h << 16) | srev_l; + + return 0; +} + +/** + * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash module + * @civd: storage for the Option ROM CIVD data. + * + * Searches through the Option ROM flash contents to locate the CIVD data for + * the image. + * + * Return: the exit code of the operation. + */ +static int +ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank, + struct ixgbe_orom_civd_info *civd) +{ + struct ixgbe_orom_civd_info tmp; + u32 offset; + int err; + + /* The CIVD section is located in the Option ROM aligned to 512 bytes. + * The first 4 bytes must contain the ASCII characters "$CIV". + * A simple modulo 256 sum of all of the bytes of the structure must + * equal 0. + */ + for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size; + offset += SZ_512) { + u8 sum = 0; + u32 i; + + err = ixgbe_read_flash_module(hw, bank, + IXGBE_E610_SR_1ST_OROM_BANK_PTR, + offset, + (u8 *)&tmp, sizeof(tmp)); + if (err) + return err; + + /* Skip forward until we find a matching signature */ + if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature, + sizeof(tmp.signature))) + continue; + + /* Verify that the simple checksum is zero */ + for (i = 0; i < sizeof(tmp); i++) + sum += ((u8 *)&tmp)[i]; + + if (sum) + return -EDOM; + + *civd = tmp; + return 0; + } + + return -ENODATA; +} + +/** + * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from active or inactive flash module + * @srev: storage for security revision + * + * Read the security revision out of the CSS header of the active OROM module + * bank. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_orom_srev(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + u32 *srev) +{ + u32 orom_size_word = hw->flash.banks.orom_size / 2; + u32 css_start, hdr_len; + u16 srev_l, srev_h; + int err; + + err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len); + if (err) + return err; + + if (orom_size_word < hdr_len) + return -EINVAL; + + /* Calculate how far into the Option ROM the CSS header starts. Note + * that ixgbe_read_orom_module takes a word offset. + */ + css_start = orom_size_word - hdr_len; + err = ixgbe_read_orom_module(hw, bank, + css_start + IXGBE_NVM_CSS_SREV_L, + &srev_l); + if (err) + return err; + + err = ixgbe_read_orom_module(hw, bank, + css_start + IXGBE_NVM_CSS_SREV_H, + &srev_h); + if (err) + return err; + + *srev = srev_h << 16 | srev_l; + + return 0; +} + +/** + * ixgbe_get_orom_ver_info - Read Option ROM version information + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash module + * @orom: pointer to Option ROM info structure + * + * Read Option ROM version and security revision from the Option ROM flash + * section. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + struct ixgbe_orom_info *orom) +{ + struct ixgbe_orom_civd_info civd; + u32 combo_ver; + int err; + + err = ixgbe_get_orom_civd_data(hw, bank, &civd); + if (err) + return err; + + combo_ver = le32_to_cpu(civd.combo_ver); + + orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver); + orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver); + orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver); + + return ixgbe_get_orom_srev(hw, bank, &orom->srev); +} + +/** + * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank + * @hw: pointer to the HW structure + * @orom: storage for Option ROM version information + * + * Read the Option ROM version and security revision data for the inactive + * section of flash. Used to access version data for a pending update that has + * not yet been activated. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw, + struct ixgbe_orom_info *orom) +{ + return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom); +} + +/** + * ixgbe_get_nvm_ver_info - Read NVM version information + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @nvm: pointer to NVM info structure + * + * Read the NVM EETRACK ID and map version of the main NVM image bank, filling + * in the nvm info structure. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + struct ixgbe_nvm_info *nvm) +{ + u16 eetrack_lo, eetrack_hi, ver; + int err; + + err = ixgbe_read_nvm_sr_copy(hw, bank, + IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver); + if (err) + return err; + + nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver); + nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver); + + err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO, + &eetrack_lo); + if (err) + return err; + + err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI, + &eetrack_hi); + if (err) + return err; + + nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; + + ixgbe_get_nvm_srev(hw, bank, &nvm->srev); + + return 0; +} + +/** + * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank + * @hw: pointer to the HW structure + * @nvm: storage for Option ROM version information + * + * Read the NVM EETRACK ID, Map version, and security revision of the + * inactive NVM bank. Used to access version data for a pending update that + * has not yet been activated. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm) +{ + return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm); +} + +/** + * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank + * @hw: pointer to the HW structure + * @nvm: storage for Option ROM version information + * + * Reads the NVM EETRACK ID, Map version, and security revision of the + * active NVM bank. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, + struct ixgbe_nvm_info *nvm) +{ + return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm); +} + +/** + * ixgbe_get_netlist_info - Read the netlist version information + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @netlist: pointer to netlist version info structure + * + * Get the netlist version information from the requested bank. Reads the Link + * Topology section to find the Netlist ID block and extract the relevant + * information into the netlist version structure. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_netlist_info(struct ixgbe_hw *hw, + enum ixgbe_bank_select bank, + struct ixgbe_netlist_info *netlist) +{ + u16 module_id, length, node_count, i; + u16 *id_blk; + int err; + + err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET, + &module_id); + if (err) + return err; + + if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) + return -EIO; + + err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN, + &length); + if (err) + return err; + + /* Sanity check that we have at least enough words to store the + * netlist ID block. + */ + if (length < IXGBE_NETLIST_ID_BLK_SIZE) + return -EIO; + + err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT, + &node_count); + if (err) + return err; + + node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M; + + id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL); + if (!id_blk) + return -ENOMEM; + + /* Read out the entire Netlist ID Block at once. */ + err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR, + IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * + sizeof(*id_blk), (u8 *)id_blk, + IXGBE_NETLIST_ID_BLK_SIZE * + sizeof(*id_blk)); + if (err) + goto free_id_blk; + + for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++) + id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]); + + netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | + id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW]; + netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | + id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW]; + netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | + id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW]; + netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 | + id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW]; + netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER]; + /* Read the left most 4 bytes of SHA */ + netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | + id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; + +free_id_blk: + kfree(id_blk); + return err; +} + +/** + * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank + * @hw: pointer to the HW struct + * @netlist: pointer to netlist version info structure + * + * Read the netlist version data from the inactive netlist bank. Used to + * extract version data of a pending flash update in order to display the + * version data. + * + * Return: the exit code of the operation. + */ +int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, + struct ixgbe_netlist_info *netlist) +{ + return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist); +} + +/** + * ixgbe_get_flash_data - get flash data + * @hw: pointer to the HW struct + * + * Read and populate flash data such as Shadow RAM size, + * max_timeout and blank_nvm_mode + * + * Return: the exit code of the operation. + */ +int ixgbe_get_flash_data(struct ixgbe_hw *hw) +{ + struct ixgbe_flash_info *flash = &hw->flash; + u32 fla, gens_stat; + u8 sr_size; + int err; + + /* The SR size is stored regardless of the NVM programming mode + * as the blank mode may be used in the factory line. + */ + gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS); + sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat); + + /* Switching to words (sr_size contains power of 2) */ + flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16)); + + /* Check if we are in the normal or blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA); + if (fla & IXGBE_GLNVM_FLA_LOCKED_M) { + flash->blank_nvm_mode = false; + } else { + flash->blank_nvm_mode = true; + return -EIO; + } + + err = ixgbe_discover_flash_size(hw); + if (err) + return err; + + err = ixgbe_determine_active_flash_banks(hw); + if (err) + return err; + + err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, + &flash->nvm); + if (err) + return err; + + err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, + &flash->orom); + if (err) + return err; + + err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK, + &flash->netlist); + return err; +} + +/** + * ixgbe_aci_nvm_update_empr - update NVM using EMPR + * @hw: pointer to the HW struct + * + * Force EMP reset using ACI command (0x0709). This command allows SW to + * request an EMPR to activate new FW. + * + * Return: the exit code of the operation. + */ +int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_desc desc; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr); + + return ixgbe_aci_send_cmd(hw, &desc, NULL, 0); +} + +/* ixgbe_nvm_set_pkg_data - NVM set package data + * @hw: pointer to the HW struct + * @del_pkg_data_flag: If is set then the current pkg_data store by FW + * is deleted. + * If bit is set to 1, then buffer should be size 0. + * @data: pointer to buffer + * @length: length of the buffer + * + * Set package data using ACI command (0x070A). + * This command is equivalent to the reception of + * a PLDM FW Update GetPackageData cmd. This command should be sent + * as part of the NVM update as the first cmd in the flow. + * + * Return: the exit code of the operation. + */ +int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag, + u8 *data, u16 length) +{ + struct ixgbe_aci_cmd_nvm_pkg_data *cmd; + struct ixgbe_aci_desc desc; + + if (length != 0 && !data) + return -EINVAL; + + cmd = &desc.params.pkg_data; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data); + desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); + + if (del_pkg_data_flag) + cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE; + + return ixgbe_aci_send_cmd(hw, &desc, data, length); +} + +/* ixgbe_nvm_pass_component_tbl - NVM pass component table + * @hw: pointer to the HW struct + * @data: pointer to buffer + * @length: length of the buffer + * @transfer_flag: parameter for determining stage of the update + * @comp_response: a pointer to the response from the 0x070B ACI. + * @comp_response_code: a pointer to the response code from the 0x070B ACI. + * + * Pass component table using ACI command (0x070B). This command is equivalent + * to the reception of a PLDM FW Update PassComponentTable cmd. + * This command should be sent once per component. It can be only sent after + * Set Package Data cmd and before actual update. FW will assume these + * commands are going to be sent until the TransferFlag is set to End or + * StartAndEnd. + * + * Return: the exit code of the operation. + */ +int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code) +{ + struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd; + struct ixgbe_aci_desc desc; + int err; + + if (!data || !comp_response || !comp_response_code) + return -EINVAL; + + cmd = &desc.params.pass_comp_tbl; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, + ixgbe_aci_opc_nvm_pass_component_tbl); + desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD); + + cmd->transfer_flag = transfer_flag; + err = ixgbe_aci_send_cmd(hw, &desc, data, length); + if (!err) { + *comp_response = cmd->component_response; + *comp_response_code = cmd->component_response_code; + } + + return err; +} + +/** * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) @@ -2485,7 +3694,7 @@ int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val) if (err) return err; - err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD, + err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD, &tmp_checksum); ixgbe_release_nvm(hw); @@ -2580,9 +3789,129 @@ reset_hw_out: return err; } +/** + * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA + * @hw: pointer to hardware structure + * @module_tlv: pointer to module TLV to return + * @module_tlv_len: pointer to module TLV length to return + * @module_type: module type requested + * + * Find the requested sub module TLV type from the Preserved Field + * Area (PFA) and returns the TLV pointer and length. The caller can + * use these to read the variable length TLV value. + * + * Return: the exit code of the operation. + */ +static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv, + u16 *module_tlv_len, u16 module_type) +{ + u16 pfa_len, pfa_ptr, pfa_end_ptr; + u16 next_tlv; + int err; + + err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr); + if (err) + return err; + + err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len); + if (err) + return err; + + /* Starting with first TLV after PFA length, iterate through the list + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; + pfa_end_ptr = pfa_ptr + pfa_len; + while (next_tlv < pfa_end_ptr) { + u16 tlv_sub_module_type, tlv_len; + + /* Read TLV type */ + err = ixgbe_read_ee_aci_e610(hw, next_tlv, + &tlv_sub_module_type); + if (err) + break; + + /* Read TLV length */ + err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len); + if (err) + break; + + if (tlv_sub_module_type == module_type) { + if (tlv_len) { + *module_tlv = next_tlv; + *module_tlv_len = tlv_len; + return 0; + } + return -EIO; + } + /* Check next TLV, i.e. current TLV pointer + length + 2 words + * (for current TLV's type and length). + */ + next_tlv = next_tlv + tlv_len + 2; + } + /* Module does not exist */ + return -ENODATA; +} + +/** + * ixgbe_read_pba_string_e610 - Read PBA string from NVM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the NVM + * @pba_num_size: part number string buffer length + * + * Read the part number string from the NVM. + * + * Return: the exit code of the operation. + */ +static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + u16 pba_tlv, pba_tlv_len; + u16 pba_word, pba_size; + int err; + + *pba_num = '\0'; + + err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, + IXGBE_E610_SR_PBA_BLOCK_PTR); + if (err) + return err; + + /* pba_size is the next word */ + err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size); + if (err) + return err; + + if (pba_tlv_len < pba_size) + return -EINVAL; + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size). + */ + pba_size--; + + if (pba_num_size < (((u32)pba_size * 2) + 1)) + return -EINVAL; + + for (u16 i = 0; i < pba_size; i++) { + err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i, + &pba_word); + if (err) + return err; + + pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK, + pba_word); + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + + pba_num[(pba_size * 2)] = '\0'; + + return err; +} + static const struct ixgbe_mac_operations mac_ops_e610 = { .init_hw = ixgbe_init_hw_generic, - .start_hw = ixgbe_start_hw_X540, + .start_hw = ixgbe_start_hw_e610, .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic, .enable_rx_dma = ixgbe_enable_rx_dma_generic, .get_mac_addr = ixgbe_get_mac_addr_generic, @@ -2621,8 +3950,12 @@ static const struct ixgbe_mac_operations mac_ops_e610 = { .led_off = ixgbe_led_off_generic, .init_led_link_act = ixgbe_init_led_link_act_generic, .reset_hw = ixgbe_reset_hw_e610, + .get_fw_ver = ixgbe_aci_get_fw_ver, .get_media_type = ixgbe_get_media_type_e610, .setup_link = ixgbe_setup_link_e610, + .fw_recovery_mode = ixgbe_fw_recovery_mode_e610, + .fw_rollback_mode = ixgbe_fw_rollback_mode_e610, + .get_nvm_ver = ixgbe_get_active_nvm_ver, .get_link_capabilities = ixgbe_get_link_capabilities_e610, .get_bus_info = ixgbe_get_bus_info_generic, .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540, @@ -2647,6 +3980,8 @@ static const struct ixgbe_eeprom_operations eeprom_ops_e610 = { .read = ixgbe_read_ee_aci_e610, .read_buffer = ixgbe_read_ee_aci_buffer_e610, .validate_checksum = ixgbe_validate_eeprom_checksum_e610, + .read_pba_string = ixgbe_read_pba_string_e610, + .init_params = ixgbe_init_eeprom_params_e610, }; const struct ixgbe_info ixgbe_e610_info = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h index ba8c06b73810..bb31d65bd1c8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h @@ -36,6 +36,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse, struct ixgbe_link_status *link); int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask); int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask); +int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode); enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw); int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait); @@ -67,6 +68,11 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram); int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw); +int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw, + struct ixgbe_orom_info *orom); +int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm); +int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, + struct ixgbe_netlist_info *netlist); int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data); int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); @@ -77,5 +83,18 @@ int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val); int ixgbe_reset_hw_e610(struct ixgbe_hw *hw); +int ixgbe_get_flash_data(struct ixgbe_hw *hw); +int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw); +int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag, + u8 *data, u16 length); +int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code); +int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid); +int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid, + u32 offset, u16 length, void *data, + bool last_command, u8 command_flags); +int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags, + u8 *response_flags); #endif /* _IXGBE_E610_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index f03925c1f521..d8a919ab7027 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw, static int ixgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; bool autoneg = false; @@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, static int ixgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; int err = 0; @@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, static void ixgbe_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw_stats *hwstats = &adapter->stats; stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; @@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev, static void ixgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (ixgbe_device_supports_autoneg_fc(hw) && @@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, } } +static void ixgbe_set_pauseparam_finalize(struct net_device *netdev, + struct ixgbe_fc_info *fc) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* If the thing changed then we'll update and use new autoneg. */ + if (memcmp(fc, &hw->fc, sizeof(*fc))) { + hw->fc = *fc; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } +} + static int ixgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fc_info fc = hw->fc; @@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, else fc.requested_mode = ixgbe_fc_none; - /* if the thing changed then we'll update and use new autoneg */ - if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { - hw->fc = fc; - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); + ixgbe_set_pauseparam_finalize(netdev, &fc); + + return 0; +} + +static int ixgbe_set_pauseparam_e610(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fc_info fc = hw->fc; + + if (!ixgbe_device_supports_autoneg_fc(hw)) + return -EOPNOTSUPP; + + if (pause->autoneg == AUTONEG_DISABLE) { + netdev_info(netdev, + "Cannot disable autonegotiation on this device.\n"); + return -EOPNOTSUPP; } + fc.disable_fc_autoneg = false; + + if (pause->rx_pause && pause->tx_pause) + fc.requested_mode = ixgbe_fc_full; + else if (pause->rx_pause) + fc.requested_mode = ixgbe_fc_rx_pause; + else if (pause->tx_pause) + fc.requested_mode = ixgbe_fc_tx_pause; + else + fc.requested_mode = ixgbe_fc_none; + + ixgbe_set_pauseparam_finalize(netdev, &fc); + return 0; } static u32 ixgbe_get_msglevel(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); return adapter->msg_enable; } static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->msg_enable = data; } @@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev) static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 *regs_buff = p; u8 i; @@ -994,14 +1035,14 @@ static void ixgbe_get_regs(struct net_device *netdev, static int ixgbe_get_eeprom_len(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); return adapter->hw.eeprom.word_size * 2; } static int ixgbe_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 *eeprom_buff; int first_word, last_word, eeprom_len; @@ -1037,7 +1078,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev, static int ixgbe_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 *eeprom_buff; void *ptr; @@ -1104,10 +1145,22 @@ err: return ret_val; } +void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + ixgbe_get_flash_data(hw); + ixgbe_set_fw_version_e610(adapter); +} + static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + + /* need to refresh info for e610 in case fw reloads in runtime */ + if (adapter->hw.mac.type == ixgbe_mac_e610) + ixgbe_refresh_fw_version(adapter); strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); @@ -1161,7 +1214,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; @@ -1176,7 +1229,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_ring *temp_ring; int i, j, err = 0; u32 new_rx_count, new_tx_count; @@ -1336,7 +1389,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) static void ixgbe_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *net_stats; unsigned int start; @@ -1710,7 +1763,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) static irqreturn_t ixgbe_test_intr(int irq, void *data) { struct net_device *netdev = (struct net_device *) data; - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); @@ -2183,7 +2236,7 @@ out: static void ixgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); bool if_running = netif_running(netdev); if (ixgbe_removed(adapter->hw.hw_addr)) { @@ -2306,7 +2359,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, static void ixgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; @@ -2328,7 +2381,7 @@ static void ixgbe_get_wol(struct net_device *netdev, static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) @@ -2353,9 +2406,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } +static int ixgbe_set_wol_acpi(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 grc; + + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* disable APM wakeup */ + grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a); + grc &= ~IXGBE_GRC_APME; + IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc); + + /* erase existing filters */ + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IXGBE_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IXGBE_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IXGBE_WUFC_BC; + + IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN); + IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol); + + hw->wol_enabled = adapter->wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int ixgbe_set_wol_e610(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + return ixgbe_set_wol_acpi(netdev, wol); + else + return ixgbe_set_wol(netdev, wol); +} + static int ixgbe_nway_reset(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); @@ -2366,7 +2463,7 @@ static int ixgbe_nway_reset(struct net_device *netdev) static int ixgbe_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) @@ -2394,12 +2491,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 0; } +static int ixgbe_set_phys_id_e610(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + bool led_active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_active = true; + break; + case ETHTOOL_ID_INACTIVE: + led_active = false; + break; + default: + return -EOPNOTSUPP; + } + + return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active); +} + static int ixgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) @@ -2455,7 +2572,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_q_vector *q_vector; int i; u16 tx_itr_param, rx_itr_param, tx_itr_prev; @@ -2681,7 +2798,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { @@ -3069,7 +3186,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); int ret = -EOPNOTSUPP; switch (cmd->cmd) { @@ -3096,7 +3213,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) static u32 ixgbe_rss_indir_size(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); return ixgbe_rss_indir_tbl_entries(adapter); } @@ -3116,7 +3233,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) static int ixgbe_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); rxfh->hfunc = ETH_RSS_HASH_TOP; @@ -3134,7 +3251,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int i; u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); @@ -3176,7 +3293,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev, static int ixgbe_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); /* we always support timestamping disabled */ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); @@ -3252,7 +3369,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) static void ixgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); /* report maximum channels */ ch->max_combined = ixgbe_max_channels(adapter); @@ -3289,7 +3406,7 @@ static void ixgbe_get_channels(struct net_device *dev, static int ixgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); unsigned int count = ch->combined_count; u8 max_rss_indices = ixgbe_max_rss_indices(adapter); @@ -3327,7 +3444,7 @@ static int ixgbe_set_channels(struct net_device *dev, static int ixgbe_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; u8 sff8472_rev, addr_mode; bool page_swap = false; @@ -3373,7 +3490,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; int status = -EFAULT; u8 databyte = 0xFF; @@ -3469,7 +3586,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata) static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) @@ -3483,7 +3600,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata) static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; struct ethtool_keee eee_data; int ret_val; @@ -3538,7 +3655,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata) static u32 ixgbe_get_priv_flags(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); u32 priv_flags = 0; if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) @@ -3555,7 +3672,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); unsigned int flags2 = adapter->flags2; unsigned int i; @@ -3638,7 +3755,57 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_link_ksettings = ixgbe_set_link_ksettings, }; +static const struct ethtool_ops ixgbe_ethtool_ops_e610 = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol_e610, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pause_stats = ixgbe_get_pause_stats, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam_e610, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, + .set_phys_id = ixgbe_set_phys_id_e610, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, + .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, + .set_rxfh = ixgbe_set_rxfh, + .get_eee = ixgbe_get_eee, + .set_eee = ixgbe_set_eee, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, + .get_ts_info = ixgbe_get_ts_info, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, + .get_link_ksettings = ixgbe_get_link_ksettings, + .set_link_ksettings = ixgbe_set_link_ksettings, +}; + void ixgbe_set_ethtool_ops(struct net_device *netdev) { - netdev->ethtool_ops = &ixgbe_ethtool_ops; + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); + + if (adapter->hw.mac.type == ixgbe_mac_e610) + netdev->ethtool_ops = &ixgbe_ethtool_ops_e610; + else + netdev->ethtool_ops = &ixgbe_ethtool_ops; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 955dced844a9..7dcf6ecd157b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (xid >= netdev->fcoe_ddp_xid) return 0; - adapter = netdev_priv(netdev); + adapter = ixgbe_from_netdev(netdev); fcoe = &adapter->fcoe; ddp = &fcoe->ddp[xid]; if (!ddp->udl) @@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, if (!netdev || !sgl) return 0; - adapter = netdev_priv(netdev); + adapter = ixgbe_from_netdev(netdev); if (xid >= netdev->fcoe_ddp_xid) { e_warn(drv, "xid=0x%x out-of-range\n", xid); return 0; @@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) */ int ixgbe_fcoe_enable(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_fcoe *fcoe = &adapter->fcoe; atomic_inc(&fcoe->refcnt); @@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) */ int ixgbe_fcoe_disable(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) return -EINVAL; @@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) { u16 prefix = 0xffff; - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_mac_info *mac = &adapter->hw.mac; switch (type) { @@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u64 dsn; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c new file mode 100644 index 000000000000..49d3b66add7e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2025 Intel Corporation. */ + +#include <linux/crc32.h> +#include <linux/pldmfw.h> +#include <linux/uuid.h> + +#include "ixgbe.h" +#include "ixgbe_fw_update.h" + +struct ixgbe_fwu_priv { + struct pldmfw context; + + struct ixgbe_adapter *adapter; + struct netlink_ext_ack *extack; + + /* Track which NVM banks to activate at the end of the update */ + u8 activate_flags; + bool emp_reset_available; +}; + +/** + * ixgbe_send_package_data - Send record package data to firmware + * @context: PLDM fw update structure + * @data: pointer to the package data + * @length: length of the package data + * + * Send a copy of the package data associated with the PLDM record matching + * this device to the firmware. + * + * Note that this function sends an AdminQ command that will fail unless the + * NVM resource has been acquired. + * + * Return: zero on success, or a negative error code on failure. + */ +static int ixgbe_send_package_data(struct pldmfw *context, + const u8 *data, u16 length) +{ + struct ixgbe_fwu_priv *priv = container_of(context, + struct ixgbe_fwu_priv, + context); + struct ixgbe_adapter *adapter = priv->adapter; + struct ixgbe_hw *hw = &adapter->hw; + u8 *package_data; + int err; + + package_data = kmemdup(data, length, GFP_KERNEL); + if (!package_data) + return -ENOMEM; + + err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length); + + kfree(package_data); + + return err; +} + +/** + * ixgbe_check_component_response - Report firmware response to a component + * @adapter: device private data structure + * @response: indicates whether this component can be updated + * @code: code indicating reason for response + * @extack: netlink extended ACK structure + * + * Check whether firmware indicates if this component can be updated. Report + * a suitable error message over the netlink extended ACK if the component + * cannot be updated. + * + * Return: 0 if the component can be updated, or -ECANCELED if the + * firmware indicates the component cannot be updated. + */ +static int ixgbe_check_component_response(struct ixgbe_adapter *adapter, + u8 response, u8 code, + struct netlink_ext_ack *extack) +{ + struct ixgbe_hw *hw = &adapter->hw; + + switch (response) { + case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED: + /* Firmware indicated this update is good to proceed. */ + return 0; + case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE: + NL_SET_ERR_MSG_MOD(extack, + "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways"); + return 0; + case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: + NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating."); + break; + case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK: + if (hw->mac.ops.fw_recovery_mode && + hw->mac.ops.fw_recovery_mode(hw)) + return 0; + break; + } + + switch (code) { + case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE: + NL_SET_ERR_MSG_MOD(extack, + "Component comparison stamp is identical to running image"); + break; + case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER: + NL_SET_ERR_MSG_MOD(extack, + "Component comparison stamp is lower than running image"); + break; + case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE: + NL_SET_ERR_MSG_MOD(extack, + "Component comparison stamp is invalid"); + break; + case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE: + NL_SET_ERR_MSG_MOD(extack, + "Component table conflict occurred"); + break; + case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE: + NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met"); + break; + case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE: + NL_SET_ERR_MSG_MOD(extack, "Component not supported"); + break; + case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE: + NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded"); + break; + case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE: + NL_SET_ERR_MSG_MOD(extack, "Incomplete component image"); + break; + case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE: + NL_SET_ERR_MSG_MOD(extack, + "Component version is identical to running image"); + break; + case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE: + NL_SET_ERR_MSG_MOD(extack, + "Component version is lower than the running image"); + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Received unexpected response code from firmware"); + break; + } + + return -ECANCELED; +} + +/** + * ixgbe_send_component_table - Send PLDM component table to firmware + * @context: PLDM fw update structure + * @component: the component to process + * @transfer_flag: relative transfer order of this component + * + * Read relevant data from the component and forward it to the device + * firmware. Check the response to determine if the firmware indicates that + * the update can proceed. + * + * This function sends ACI commands related to the NVM, and assumes that + * the NVM resource has been acquired. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_send_component_table(struct pldmfw *context, + struct pldmfw_component *component, + u8 transfer_flag) +{ + struct ixgbe_fwu_priv *priv = container_of(context, + struct ixgbe_fwu_priv, + context); + struct ixgbe_adapter *adapter = priv->adapter; + struct netlink_ext_ack *extack = priv->extack; + struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl; + u8 comp_response, comp_response_code; + struct ixgbe_hw *hw = &adapter->hw; + size_t length; + int err; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + case NVM_COMP_ID_NVM: + case NVM_COMP_ID_NETLIST: + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Unable to update due to unknown firmware component"); + return -EOPNOTSUPP; + } + + length = struct_size(comp_tbl, cvs, component->version_len); + comp_tbl = kzalloc(length, GFP_KERNEL); + if (!comp_tbl) + return -ENOMEM; + + comp_tbl->comp_class = cpu_to_le16(component->classification); + comp_tbl->comp_id = cpu_to_le16(component->identifier); + comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE; + comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp); + comp_tbl->cvs_type = component->version_type; + comp_tbl->cvs_len = component->version_len; + + memcpy(comp_tbl->cvs, component->version_string, + component->version_len); + + err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length, + transfer_flag, &comp_response, + &comp_response_code); + + kfree(comp_tbl); + + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to transfer component table to firmware"); + return -EIO; + } + + return ixgbe_check_component_response(adapter, + comp_response, + comp_response_code, extack); +} + +/** + * ixgbe_write_one_nvm_block - Write an NVM block and await completion response + * @adapter: the PF data structure + * @module: the module to write to + * @offset: offset in bytes + * @block_size: size of the block to write, up to 4k + * @block: pointer to block of data to write + * @last_cmd: whether this is the last command + * @extack: netlink extended ACK structure + * + * Write a block of data to a flash module, and await for the completion + * response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * On successful return, reset level indicates the device reset required to + * complete the update. + * + * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required + * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required + * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter, + u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + struct netlink_ext_ack *extack) +{ + struct ixgbe_hw *hw = &adapter->hw; + + return ixgbe_aci_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0); +} + +/** + * ixgbe_write_nvm_module - Write data to an NVM module + * @adapter: the PF driver structure + * @module: the module id to program + * @component: the name of the component being updated + * @image: buffer of image data to write to the NVM + * @length: length of the buffer + * @extack: netlink extended ACK structure + * + * Loop over the data for a given NVM module and program it in 4 Kb + * blocks. Notify devlink core of progress after each block is programmed. + * Loops over a block of data and programs the NVM in 4k block chunks. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module, + const char *component, const u8 *image, + u32 length, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = adapter->devlink; + u32 offset = 0; + bool last_cmd; + u8 *block; + int err; + + devlink_flash_update_status_notify(devlink, "Flashing", + component, 0, length); + + block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!block) + return -ENOMEM; + + do { + u32 block_size; + + block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE, + length - offset); + last_cmd = !(offset + block_size < length); + + memcpy(block, image + offset, block_size); + + err = ixgbe_write_one_nvm_block(adapter, module, offset, + block_size, block, last_cmd, + extack); + if (err) + break; + + offset += block_size; + + devlink_flash_update_status_notify(devlink, "Flashing", + component, offset, length); + } while (!last_cmd); + + if (err) + devlink_flash_update_status_notify(devlink, "Flashing failed", + component, length, length); + else + devlink_flash_update_status_notify(devlink, "Flashing done", + component, length, length); + + kfree(block); + + return err; +} + +/* Length in seconds to wait before timing out when erasing a flash module. + * Yes, erasing really can take minutes to complete. + */ +#define IXGBE_FW_ERASE_TIMEOUT 300 + +/** + * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion + * @adapter: the PF data structure + * @module: the module to erase + * @component: name of the component being updated + * @extack: netlink extended ACK structure + * + * Erase the inactive NVM bank associated with this module, and await for + * a completion response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module, + const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = adapter->devlink; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + devlink_flash_update_timeout_notify(devlink, "Erasing", component, + IXGBE_FW_ERASE_TIMEOUT); + + err = ixgbe_aci_erase_nvm(hw, module); + if (err) + devlink_flash_update_status_notify(devlink, "Erasing failed", + component, 0, 0); + else + devlink_flash_update_status_notify(devlink, "Erasing done", + component, 0, 0); + + return err; +} + +/** + * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks + * @adapter: Pointer to the PF data structure + * @activate_flags: flags used for the activation command + * @emp_reset_available: on return, indicates if EMP reset is available + * @extack: netlink extended ACK structure + * + * Notify firmware to activate the newly written flash banks, and wait for the + * firmware response. + * + * Return: 0 on success or an error code on failure. + */ +static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter, + u8 activate_flags, + bool *emp_reset_available, + struct netlink_ext_ack *extack) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 response_flags; + int err; + + err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to switch active flash banks"); + return err; + } + + if (emp_reset_available) { + if (hw->dev_caps.common_cap.reset_restrict_support) + *emp_reset_available = + response_flags & IXGBE_ACI_NVM_EMPR_ENA; + else + *emp_reset_available = true; + } + + return 0; +} + +/** + * ixgbe_flash_component - Flash a component of the NVM + * @context: PLDM fw update structure + * @component: the component table to program + * + * Program the flash contents for a given component. First, determine the + * module id. Then, erase the secondary bank for this module. Finally, write + * the contents of the component to the NVM. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_flash_component(struct pldmfw *context, + struct pldmfw_component *component) +{ + struct ixgbe_fwu_priv *priv = container_of(context, + struct ixgbe_fwu_priv, + context); + struct netlink_ext_ack *extack = priv->extack; + struct ixgbe_adapter *adapter = priv->adapter; + const char *name; + u16 module; + int err; + u8 flag; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + module = IXGBE_E610_SR_1ST_OROM_BANK_PTR; + flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM; + name = "fw.undi"; + break; + case NVM_COMP_ID_NVM: + module = IXGBE_E610_SR_1ST_NVM_BANK_PTR; + flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM; + name = "fw.mgmt"; + break; + case NVM_COMP_ID_NETLIST: + module = IXGBE_E610_SR_NETLIST_BANK_PTR; + flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST; + name = "fw.netlist"; + break; + + default: + return -EOPNOTSUPP; + } + + /* Mark this component for activating at the end. */ + priv->activate_flags |= flag; + + err = ixgbe_erase_nvm_module(adapter, module, name, extack); + if (err) + return err; + + return ixgbe_write_nvm_module(adapter, module, name, + component->component_data, + component->component_size, extack); +} + +/** + * ixgbe_finalize_update - Perform last steps to complete device update + * @context: PLDM fw update structure + * + * Called as the last step of the update process. Complete the update by + * telling the firmware to switch active banks, and perform a reset of + * configured. + * + * Return: 0 on success, or an error code on failure. + */ +static int ixgbe_finalize_update(struct pldmfw *context) +{ + struct ixgbe_fwu_priv *priv = container_of(context, + struct ixgbe_fwu_priv, + context); + struct ixgbe_adapter *adapter = priv->adapter; + struct netlink_ext_ack *extack = priv->extack; + struct devlink *devlink = adapter->devlink; + int err; + + /* Finally, notify firmware to activate the written NVM banks */ + err = ixgbe_switch_flash_banks(adapter, priv->activate_flags, + &priv->emp_reset_available, extack); + if (err) + return err; + + adapter->fw_emp_reset_disabled = !priv->emp_reset_available; + + if (!adapter->fw_emp_reset_disabled) + devlink_flash_update_status_notify(devlink, + "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required", + NULL, 0, 0); + + return 0; +} + +static const struct pldmfw_ops ixgbe_fwu_ops_e610 = { + .match_record = &pldmfw_op_pci_match_record, + .send_package_data = &ixgbe_send_package_data, + .send_component_table = &ixgbe_send_component_table, + .flash_component = &ixgbe_flash_component, + .finalize_update = &ixgbe_finalize_update, +}; + +/** + * ixgbe_get_pending_updates - Check if the component has a pending update + * @adapter: the PF driver structure + * @pending: on return, bitmap of updates pending + * @extack: Netlink extended ACK + * + * Check if the device has any pending updates on any flash components. + * + * Return: 0 on success, or a negative error code on failure. Update + * pending with the bitmap of pending updates. + */ +int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending, + struct netlink_ext_ack *extack) +{ + struct ixgbe_hw_dev_caps *dev_caps; + struct ixgbe_hw *hw = &adapter->hw; + int err; + + dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); + if (!dev_caps) + return -ENOMEM; + + err = ixgbe_discover_dev_caps(hw, dev_caps); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Unable to read device capabilities"); + kfree(dev_caps); + return -EIO; + } + + *pending = 0; + + if (dev_caps->common_cap.nvm_update_pending_nvm) + *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM; + + if (dev_caps->common_cap.nvm_update_pending_orom) + *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM; + + if (dev_caps->common_cap.nvm_update_pending_netlist) + *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST; + + kfree(dev_caps); + + return 0; +} + +/** + * ixgbe_cancel_pending_update - Cancel any pending update for a component + * @adapter: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Cancel any pending update for the specified component. If component is + * NULL, all device updates will be canceled. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter, + const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = adapter->devlink; + struct ixgbe_hw *hw = &adapter->hw; + u8 pending; + int err; + + err = ixgbe_get_pending_updates(adapter, &pending, extack); + if (err) + return err; + + /* If the flash_update request is for a specific component, ignore all + * of the other components. + */ + if (component) { + if (strcmp(component, "fw.mgmt") == 0) + pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM; + else if (strcmp(component, "fw.undi") == 0) + pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM; + else if (strcmp(component, "fw.netlist") == 0) + pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST; + else + return -EINVAL; + } + + /* There is no previous pending update, so this request may continue */ + if (!pending) + return 0; + + /* In order to allow overwriting a previous pending update, notify + * firmware to cancel that update by issuing the appropriate command. + */ + devlink_flash_update_status_notify(devlink, + "Canceling previous pending update", + component, 0, 0); + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to acquire device flash lock"); + return -EIO; + } + + pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV; + err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack); + + ixgbe_release_nvm(hw); + + return err; +} + +/** + * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device + * @devlink: pointer to devlink associated with the device to update + * @params: devlink flash update parameters + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Return: 0 on success or a negative error code on failure. + */ +int ixgbe_flash_pldm_image(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct ixgbe_adapter *adapter = devlink_priv(devlink); + struct device *dev = &adapter->pdev->dev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fwu_priv priv; + u8 preservation; + int err; + + if (hw->mac.type != ixgbe_mac_e610) + return -EOPNOTSUPP; + + switch (params->overwrite_mask) { + case 0: + /* preserve all settings and identifiers */ + preservation = IXGBE_ACI_NVM_PRESERVE_ALL; + break; + case DEVLINK_FLASH_OVERWRITE_SETTINGS: + /* Overwrite settings, but preserve vital information such as + * device identifiers. + */ + preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED; + break; + case (DEVLINK_FLASH_OVERWRITE_SETTINGS | + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS): + /* overwrite both settings and identifiers, preserve nothing */ + preservation = IXGBE_ACI_NVM_NO_PRESERVATION; + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Requested overwrite mask is not supported"); + return -EOPNOTSUPP; + } + + /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit + * cannot lead to error + */ + if (!hw->dev_caps.common_cap.nvm_unified_update && + (hw->mac.ops.fw_recovery_mode && + !hw->mac.ops.fw_recovery_mode(hw))) { + NL_SET_ERR_MSG_MOD(extack, + "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + memset(&priv, 0, sizeof(priv)); + + priv.context.ops = &ixgbe_fwu_ops_e610; + priv.context.dev = dev; + priv.extack = extack; + priv.adapter = adapter; + priv.activate_flags = preservation; + + devlink_flash_update_status_notify(devlink, + "Preparing to flash", NULL, 0, 0); + + err = ixgbe_cancel_pending_update(adapter, NULL, extack); + if (err) + return err; + + err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to acquire device flash lock"); + return -EIO; + } + + err = pldmfw_flash_image(&priv.context, params->fw); + if (err == -ENOENT) { + NL_SET_ERR_MSG_MOD(extack, + "Firmware image has no record matching this device"); + } else if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image"); + } + + ixgbe_release_nvm(hw); + + return err; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h new file mode 100644 index 000000000000..abdd708c93df --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2025 Intel Corporation. */ + +#ifndef _IXGBE_FW_UPDATE_H_ +#define _IXGBE_FW_UPDATE_H_ + +int ixgbe_flash_pldm_image(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack); +int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending, + struct netlink_ext_ack *extack); +#endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 07ea1954a276..d1f4073b36f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -9,7 +9,7 @@ #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); +static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs); /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers @@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (r->used) { if (r->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(r->xs); + ixgbe_ipsec_del_sa(adapter->netdev, r->xs); else ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, r->key, r->salt, @@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (t->used) { if (t->mode & IXGBE_RXTXMOD_VF) - ixgbe_ipsec_del_sa(t->xs); + ixgbe_ipsec_del_sa(adapter->netdev, t->xs); else ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); } @@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, /** * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @dev: pointer to net device * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate @@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ -static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, +static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev, + struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, /** * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters + * @dev: pointer to net device * @xs: pointer to transformer state struct **/ -static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) +static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev, + struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; int num_filters = 4; @@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) /** * ixgbe_ipsec_add_sa - program device with a security association + * @dev: pointer to device to program * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ -static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, +static int ixgbe_ipsec_add_sa(struct net_device *dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { - struct net_device *dev = xs->xso.real_dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; int checked, match, first; @@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, return -EINVAL; } - if (ixgbe_ipsec_check_mgmt_ip(xs)) { + if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) { NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters"); return -EINVAL; } @@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ - ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; @@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; - ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); @@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs, /** * ixgbe_ipsec_del_sa - clear out this specific SA + * @dev: pointer to device to program * @xs: pointer to transformer state struct **/ -static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) +static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 zerobuf[4] = {0, 0, 0, 0}; @@ -841,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) continue; if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->rx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); + ixgbe_ipsec_del_sa(adapter->netdev, + ipsec->rx_tbl[i].xs); } /* search tx sa table */ @@ -850,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) continue; if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && ipsec->tx_tbl[i].vf == vf) - ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); + ixgbe_ipsec_del_sa(adapter->netdev, + ipsec->tx_tbl[i].xs); } } @@ -930,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); /* set up the HW offload */ - err = ixgbe_ipsec_add_sa(xs, NULL); + err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL); if (err) goto err_aead; @@ -1034,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) xs = ipsec->tx_tbl[sa_idx].xs; } - ixgbe_ipsec_del_sa(xs); + ixgbe_ipsec_del_sa(adapter->netdev, xs); /* remove the xs that was made-up in the add request */ kfree_sensitive(xs); @@ -1052,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd) { - struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; struct sec_path *sp; @@ -1142,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev); __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a2718218963e..03d31e5b131d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -49,6 +49,7 @@ #include "ixgbe_sriov.h" #include "ixgbe_model.h" #include "ixgbe_txrx_common.h" +#include "devlink/devlink.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -1095,7 +1096,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) static int ixgbe_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 bcnrc_val = ixgbe_link_mbps(adapter); @@ -4678,7 +4679,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ @@ -4737,7 +4738,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ @@ -4962,7 +4963,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) **/ static int ixgbe_write_mc_addr_list(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (!netif_running(netdev)) @@ -5138,7 +5139,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int ret; ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); @@ -5148,7 +5149,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); @@ -5166,7 +5167,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) **/ void ixgbe_set_rx_mode(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; netdev_features_t features = netdev->features; @@ -5268,7 +5269,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; struct udp_tunnel_info ti; @@ -6600,7 +6601,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) **/ static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); /* Do the reset outside of interrupt context */ ixgbe_tx_timeout_reset(adapter); @@ -6849,7 +6850,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; /* initialize eeprom parameters */ - if (ixgbe_init_eeprom_params_generic(hw)) { + if (hw->eeprom.ops.init_params(hw)) { e_dev_err("EEPROM initialization failed\n"); return -EIO; } @@ -7165,7 +7166,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter) **/ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (ixgbe_enabled_xdp_adapter(adapter)) { int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD; @@ -7212,7 +7213,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) **/ int ixgbe_open(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; int err, queues; @@ -7316,7 +7317,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) **/ int ixgbe_close(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); ixgbe_ptp_stop(adapter); @@ -8364,6 +8365,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) rtnl_unlock(); } +static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (hw->mac.type != ixgbe_mac_e610) + return 0; + + if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw)) + return 0; + + if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) { + e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + + adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; + return -EOPNOTSUPP; + } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR && + hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) { + e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; + } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR || + hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) { + e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH; + } + + return 0; +} + /** * ixgbe_check_fw_error - Check firmware for errors * @adapter: the adapter private structure @@ -8374,12 +8403,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 fwsm; + int err; /* read fwsm.ext_err_ind register and log errors */ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + /* skip if E610's FW is reloading, warning in that case may be misleading */ if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || - !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610))) e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", fwsm); @@ -8387,10 +8418,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); return true; } + if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) { + err = ixgbe_check_fw_api_mismatch(adapter); + if (err) + return true; + } + + /* return here if FW rollback mode has been already detected */ + if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK) + return false; + + if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) { + struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm; + char ver_buff[64] = ""; + + if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw)) + goto no_version; + + if (hw->mac.ops.get_nvm_ver && + hw->mac.ops.get_nvm_ver(hw, nvm_info)) + goto no_version; + + snprintf(ver_buff, sizeof(ver_buff), + "Current version is NVM:%x.%x.%x, FW:%d.%d. ", + nvm_info->major, nvm_info->minor, nvm_info->eetrack, + hw->fw_maj_ver, hw->fw_maj_ver); +no_version: + e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.", + ver_buff); + + adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK; + } return false; } +static void ixgbe_recovery_service_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); + + ixgbe_handle_fw_event(adapter); + ixgbe_service_event_complete(adapter); + + mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100)); +} + /** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data @@ -8410,8 +8484,13 @@ static void ixgbe_service_task(struct work_struct *work) return; } if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + if (adapter->mii_bus) { + mdiobus_unregister(adapter->mii_bus); + adapter->mii_bus = NULL; + } unregister_netdev(adapter->netdev); + } ixgbe_service_event_complete(adapter); return; } @@ -9001,7 +9080,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, switch (vlan_get_protocol(skb)) { case htons(ETH_P_FCOE): case htons(ETH_P_FIP): - adapter = netdev_priv(dev); + adapter = ixgbe_from_netdev(dev); if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; @@ -9260,7 +9339,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev, struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_ring *tx_ring; /* @@ -9292,7 +9371,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, **/ static int ixgbe_set_mac(struct net_device *netdev, void *p) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; @@ -9310,7 +9389,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; u16 value; int rc; @@ -9336,7 +9415,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr, u16 value) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (adapter->mii_bus) { @@ -9356,7 +9435,7 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); switch (cmd) { case SIOCSHWTSTAMP: @@ -9382,7 +9461,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) static int ixgbe_add_sanmac_netdev(struct net_device *dev) { int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; if (is_valid_ether_addr(hw->mac.san_addr)) { @@ -9406,7 +9485,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) static int ixgbe_del_sanmac_netdev(struct net_device *dev) { int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_mac_info *mac = &adapter->hw.mac; if (is_valid_ether_addr(mac->san_addr)) { @@ -9437,7 +9516,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, static void ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int i; rcu_read_lock(); @@ -9480,7 +9559,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf, struct ifla_vf_stats *vf_stats) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; @@ -9597,7 +9676,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, static void ixgbe_defrag_macvlan_pools(struct net_device *dev) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct netdev_nested_priv priv = { .data = (void *)adapter, }; @@ -9618,7 +9697,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev) */ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_hw *hw = &adapter->hw; /* Hardware supports up to 8 traffic classes */ @@ -10176,7 +10255,7 @@ static LIST_HEAD(ixgbe_block_cb_list); static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); switch (type) { case TC_SETUP_BLOCK: @@ -10204,7 +10283,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) #endif void ixgbe_do_reset(struct net_device *netdev) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); @@ -10215,7 +10294,7 @@ void ixgbe_do_reset(struct net_device *netdev) static netdev_features_t ixgbe_fix_features(struct net_device *netdev, netdev_features_t features) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) @@ -10252,7 +10331,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); netdev_features_t changed = netdev->features ^ features; bool need_reset = false; @@ -10328,7 +10407,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); u16 pool = VMDQ_P(0); if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) @@ -10416,7 +10495,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags, struct netlink_ext_ack *extack) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct nlattr *attr, *br_spec; int rem; @@ -10444,7 +10523,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; @@ -10456,7 +10535,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { - struct ixgbe_adapter *adapter = netdev_priv(pdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev); struct ixgbe_fwd_adapter *accel; int tcs = adapter->hw_tcs ? : 1; int pool, err; @@ -10553,7 +10632,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) static void ixgbe_fwd_del(struct net_device *pdev, void *priv) { struct ixgbe_fwd_adapter *accel = priv; - struct ixgbe_adapter *adapter = netdev_priv(pdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev); unsigned int rxbase = accel->rx_base_queue; unsigned int i; @@ -10631,7 +10710,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) { int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct bpf_prog *old_prog; bool need_reset; int num_queues; @@ -10703,7 +10782,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); switch (xdp->command) { case XDP_SETUP_PROG: @@ -10738,7 +10817,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ring *ring; int nxmit = 0; int i; @@ -11146,7 +11225,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, * format to display. The FW version is taken from the EEPROM/NVM. * */ -static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter) +void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter) { struct ixgbe_orom_info *orom = &adapter->hw.flash.orom; struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm; @@ -11197,6 +11276,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) } /** + * ixgbe_recovery_probe - Handle FW recovery mode during probe + * @adapter: the adapter private structure + * + * Perform limited driver initialization when FW error is detected. + * + * Return: 0 on successful probe for E610, -EIO if recovery mode is detected + * for non-E610 adapter, error status code on any other case. + */ +static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct ixgbe_hw *hw = &adapter->hw; + bool disable_dev; + int err = -EIO; + + if (hw->mac.type != ixgbe_mac_e610) + goto clean_up_probe; + + ixgbe_get_hw_control(adapter); + mutex_init(&hw->aci.lock); + err = ixgbe_get_flash_data(&adapter->hw); + if (err) + goto shutdown_aci; + + timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); + INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task); + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + + pci_set_drvdata(pdev, adapter); + /* We are creating devlink interface so NIC can be managed, + * e.g. new NVM image loaded + */ + devl_lock(adapter->devlink); + ixgbe_devlink_register_port(adapter); + SET_NETDEV_DEVLINK_PORT(adapter->netdev, + &adapter->devlink_port); + ixgbe_devlink_init_regions(adapter); + devl_register(adapter->devlink); + devl_unlock(adapter->devlink); + + return 0; +shutdown_aci: + mutex_destroy(&adapter->hw.aci.lock); + ixgbe_release_hw_control(adapter); + devlink_free(adapter->devlink); +clean_up_probe: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + pci_release_mem_regions(pdev); + if (disable_dev) + pci_disable_device(pdev); + return err; +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -11210,6 +11349,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; + struct ixgbe_netdevice_priv *netdev_priv_wrapper; struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; @@ -11263,7 +11403,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) indices = IXGBE_MAX_RSS_INDICES_X550; } - netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); + adapter = ixgbe_allocate_devlink(&pdev->dev); + if (IS_ERR(adapter)) { + err = PTR_ERR(adapter); + goto err_devlink; + } + + netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -11271,7 +11417,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); - adapter = netdev_priv(netdev); + netdev_priv_wrapper = netdev_priv(netdev); + netdev_priv_wrapper->adapter = adapter; adapter->netdev = netdev; adapter->pdev = pdev; @@ -11287,11 +11434,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; } - netdev->netdev_ops = &ixgbe_netdev_ops; - ixgbe_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; - strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); - /* Setup hw api */ hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; @@ -11321,15 +11463,31 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->phy.mdio.mdio_read = ixgbe_mdio_read; hw->phy.mdio.mdio_write = ixgbe_mdio_write; + netdev->netdev_ops = &ixgbe_netdev_ops; + ixgbe_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + /* setup the private structure */ err = ixgbe_sw_init(adapter, ii); if (err) goto err_sw_init; + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + + if (ixgbe_check_fw_error(adapter)) + return ixgbe_recovery_probe(adapter); + if (adapter->hw.mac.type == ixgbe_mac_e610) { err = ixgbe_get_caps(&adapter->hw); if (err) dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err); + + err = ixgbe_get_flash_data(&adapter->hw); + if (err) + goto err_sw_init; } if (adapter->hw.mac.type == ixgbe_mac_82599EB) @@ -11348,10 +11506,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - /* Make sure the SWFW semaphore is in a valid state */ - if (hw->mac.ops.init_swfw_sync) - hw->mac.ops.init_swfw_sync(hw); - /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -11504,11 +11658,6 @@ skip_sriov: if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; - if (ixgbe_check_fw_error(adapter)) { - err = -EIO; - goto err_sw_init; - } - /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); @@ -11591,7 +11740,7 @@ skip_sriov: if (expected_gts > 0) ixgbe_check_minimum_link(adapter, expected_gts); - err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); + err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str)); if (err) strscpy(part_str, "Unknown", sizeof(part_str)); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) @@ -11617,6 +11766,11 @@ skip_sriov: } strcpy(netdev->name, "eth%d"); pci_set_drvdata(pdev, adapter); + + devl_lock(adapter->devlink); + ixgbe_devlink_register_port(adapter); + SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port); + err = register_netdev(netdev); if (err) goto err_register; @@ -11671,11 +11825,16 @@ skip_sriov: if (err) goto err_netdev; + ixgbe_devlink_init_regions(adapter); + devl_register(adapter->devlink); + devl_unlock(adapter->devlink); return 0; err_netdev: unregister_netdev(netdev); err_register: + devl_port_unregister(&adapter->devlink_port); + devl_unlock(adapter->devlink); ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); if (hw->mac.type == ixgbe_mac_e610) @@ -11692,7 +11851,9 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + devlink_free(adapter->devlink); pci_release_mem_regions(pdev); +err_devlink: err_pci_reg: err_dma: if (!adapter || disable_dev) @@ -11721,6 +11882,9 @@ static void ixgbe_remove(struct pci_dev *pdev) return; netdev = adapter->netdev; + devl_lock(adapter->devlink); + devl_unregister(adapter->devlink); + ixgbe_devlink_destroy_regions(adapter); ixgbe_dbg_adapter_exit(adapter); set_bit(__IXGBE_REMOVING, &adapter->state); @@ -11756,6 +11920,10 @@ static void ixgbe_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + devl_port_unregister(&adapter->devlink_port); + devl_unlock(adapter->devlink); + devlink_free(adapter->devlink); + ixgbe_stop_ipsec_offload(adapter); ixgbe_clear_interrupt_scheme(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 0a03a8bb5f88..2d54828bdfbb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - int max_retry = 1; + int max_retry = 3; int retry = 0; u8 reg_high; u8 csum; @@ -2285,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - u32 max_retry = 1; + u32 max_retry = 3; u32 retry = 0; int status; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index ccdce80edd14..0dbbd2befd4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int retval; if (vf >= adapter->num_vfs) @@ -1526,7 +1526,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { int err = 0; - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; @@ -1644,7 +1644,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int link_speed; /* verify VF is active */ @@ -1679,7 +1679,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_hw *hw = &adapter->hw; if (vf >= adapter->num_vfs) @@ -1757,7 +1757,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) **/ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); int ret = 0; if (vf < 0 || vf >= adapter->num_vfs) { @@ -1794,7 +1794,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); /* This operation is currently supported only for 82599 and x540 * devices. @@ -1813,7 +1813,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (vf >= adapter->num_vfs) return -EINVAL; @@ -1836,7 +1836,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); if (vf >= adapter->num_vfs) return -EINVAL; ivi->vf = vf; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 5fdf32d79d82..892fa6c1f879 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3446,6 +3446,8 @@ struct ixgbe_eeprom_operations { int (*validate_checksum)(struct ixgbe_hw *, u16 *); int (*update_checksum)(struct ixgbe_hw *); int (*calc_checksum)(struct ixgbe_hw *); + int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); }; struct ixgbe_mac_operations { @@ -3454,6 +3456,7 @@ struct ixgbe_mac_operations { int (*start_hw)(struct ixgbe_hw *); int (*clear_hw_cntrs)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + int (*get_fw_ver)(struct ixgbe_hw *hw); int (*get_mac_addr)(struct ixgbe_hw *, u8 *); int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); int (*get_device_caps)(struct ixgbe_hw *, u16 *); @@ -3522,6 +3525,8 @@ struct ixgbe_mac_operations { int (*get_thermal_sensor_data)(struct ixgbe_hw *); int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); bool (*fw_recovery_mode)(struct ixgbe_hw *hw); + bool (*fw_rollback_mode)(struct ixgbe_hw *hw); + int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h index 617e07878e4f..09df67f03cf4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h @@ -10,11 +10,75 @@ #define IXGBE_MAX_VSI 768 /* Checksum and Shadow RAM pointers */ -#define E610_SR_SW_CHECKSUM_WORD 0x3F +#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00 +#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16 +#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8) +#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18 +#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D +#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E +#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0) +#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12) +#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F +#define IXGBE_E610_SR_PFA_PTR 0x40 +#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42 +#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43 +#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44 +#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45 +#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46 +#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47 + +/* The OROM version topology */ +#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0) +#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8) +#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24) + +/* CSS Header words */ +#define IXGBE_NVM_CSS_HDR_LEN_L 0x02 +#define IXGBE_NVM_CSS_HDR_LEN_H 0x03 +#define IXGBE_NVM_CSS_SREV_L 0x14 +#define IXGBE_NVM_CSS_SREV_H 0x15 + +#define IXGBE_HDR_LEN_ROUNDUP 32 + +/* Length of Authentication header section in words */ +#define IXGBE_NVM_AUTH_HEADER_LEN 0x08 /* Shadow RAM related */ #define IXGBE_SR_WORDS_IN_1KB 512 +/* The Netlist ID Block is located after all of the Link Topology nodes. */ +#define IXGBE_NETLIST_ID_BLK_SIZE 0x30 +#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n)) + +/* netlist ID block field offsets (word offsets) */ +#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02 +#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03 +#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04 +#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05 +#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06 +#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07 +#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08 +#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09 +#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n)) +#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F + +/* The Link Topology Netlist section is stored as a series of words. It is + * stored in the NVM as a TLV, with the first two words containing the type + * and length. + */ +#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B +#define IXGBE_NETLIST_TYPE_OFFSET 0x0000 +#define IXGBE_NETLIST_LEN_OFFSET 0x0001 + +/* The Link Topology section follows the TLV header. When reading the netlist + * using ixgbe_read_netlist_module, we need to account for the 2-word TLV + * header. + */ +#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2) +#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000) +#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001) +#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0) + /* Firmware Status Register (GL_FWSTS) */ #define GL_FWSTS 0x00083048 /* Reset Source: POR */ #define GL_FWSTS_EP_PF0 BIT(24) @@ -24,11 +88,23 @@ #define GLNVM_GENS 0x000B6100 /* Reset Source: POR */ #define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5) +#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */ +#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1) +#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2) + /* Flash Access Register */ #define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */ #define IXGBE_GLNVM_FLA_LOCKED_S 6 #define IXGBE_GLNVM_FLA_LOCKED_M BIT(6) +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6) +#define IXGBE_SR_CTRL_WORD_VALID BIT(0) +#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3) +#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4) +#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5) +#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15) + /* Admin Command Interface (ACI) registers */ #define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4)) #define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4)) @@ -40,6 +116,10 @@ #define IXGBE_PF_HICR_SV BIT(2) #define IXGBE_PF_HICR_EV BIT(3) +#define IXGBE_FW_API_VER_MAJOR 0x01 +#define IXGBE_FW_API_VER_MINOR 0x07 +#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02 + #define IXGBE_ACI_DESC_SIZE 32 #define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD) @@ -143,6 +223,7 @@ enum ixgbe_aci_opc { ixgbe_aci_opc_write_mdio = 0x06E5, ixgbe_aci_opc_set_gpio_by_func = 0x06E6, ixgbe_aci_opc_get_gpio_by_func = 0x06E7, + ixgbe_aci_opc_set_port_id_led = 0x06E9, ixgbe_aci_opc_set_gpio = 0x06EC, ixgbe_aci_opc_get_gpio = 0x06ED, ixgbe_aci_opc_sff_eeprom = 0x06EE, @@ -728,6 +809,18 @@ struct ixgbe_aci_cmd_get_link_topo_pin { u8 rsvd[7]; }; +/* Set Port Identification LED (direct, 0x06E9) */ +struct ixgbe_aci_cmd_set_port_id_led { + u8 lport_num; + u8 lport_num_valid; + u8 ident_mode; + u8 rsvd[13]; +}; + +#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0) +#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0 +#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0) + /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ixgbe_aci_cmd_sff_eeprom { u8 lport_num; @@ -761,6 +854,8 @@ struct ixgbe_aci_cmd_nvm { #define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF __le16 offset_low; u8 offset_high; /* For Write Activate offset_high is used as flags2 */ +#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8) +#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16) u8 cmd_flags; #define IXGBE_ACI_NVM_LAST_CMD BIT(0) #define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ @@ -776,6 +871,9 @@ struct ixgbe_aci_cmd_nvm { #define IXGBE_ACI_NVM_PERST_FLAG 1 #define IXGBE_ACI_NVM_EMPR_FLAG 2 #define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ +#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0 +#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6 + /* For Write Activate, several flags are sent as part of a separate * flags2 field using a separate byte. For simplicity of the software * interface, we pass the flags as a 16 bit value so these flags are @@ -805,6 +903,63 @@ struct ixgbe_aci_cmd_nvm_checksum { u8 rsvd2[12]; }; +/* Used for NVM Set Package Data command - 0x070A */ +struct ixgbe_aci_cmd_nvm_pkg_data { + u8 reserved[3]; + u8 cmd_flags; +#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */ + + u32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* Used for Pass Component Table command - 0x070B */ +struct ixgbe_aci_cmd_nvm_pass_comp_tbl { + u8 component_response; /* Response only */ +#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 +#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 +#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 +#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3 + u8 component_response_code; /* Response only */ +#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 +#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 +#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2 +#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3 +#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4 +#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5 +#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6 +#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7 +#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8 +#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA +#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB + u8 reserved; + u8 transfer_flag; + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ixgbe_aci_cmd_nvm_comp_tbl { + __le16 comp_class; +#define NVM_COMP_CLASS_ALL_FW 0x000A + + __le16 comp_id; +#define NVM_COMP_ID_OROM 0x5 +#define NVM_COMP_ID_NVM 0x6 +#define NVM_COMP_ID_NETLIST 0x8 + + u8 comp_class_idx; +#define FWU_COMP_CLASS_IDX_NOT_USE 0x0 + + __le32 comp_cmp_stamp; + u8 cvs_type; +#define NVM_CVS_TYPE_ASCII 0x1 + + u8 cvs_len; + u8 cvs[]; /* Component Version String */ +} __packed; + /** * struct ixgbe_aci_desc - Admin Command (AC) descriptor * @flags: IXGBE_ACI_FLAG_* flags @@ -843,11 +998,14 @@ struct ixgbe_aci_desc { struct ixgbe_aci_cmd_restart_an restart_an; struct ixgbe_aci_cmd_get_link_status get_link_status; struct ixgbe_aci_cmd_set_event_mask set_event_mask; + struct ixgbe_aci_cmd_set_port_id_led set_port_id_led; struct ixgbe_aci_cmd_get_link_topo get_link_topo; struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin; struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param; struct ixgbe_aci_cmd_nvm nvm; struct ixgbe_aci_cmd_nvm_checksum nvm_checksum; + struct ixgbe_aci_cmd_nvm_pkg_data pkg_data; + struct ixgbe_aci_cmd_nvm_pass_comp_tbl pass_comp_tbl; } params; }; @@ -984,6 +1142,16 @@ struct ixgbe_hw_caps { #define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1) } __packed; +#define IXGBE_OROM_CIV_SIGNATURE "$CIV" + +struct ixgbe_orom_civd_info { + u8 signature[4]; /* Must match ASCII '$CIV' characters */ + u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */ + __le32 combo_ver; /* Combo Image Version number */ + u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */ + __le16 combo_name[32]; /* Unicode string representing the Combo Image version */ +}; + /* Function specific capabilities */ struct ixgbe_hw_func_caps { u32 num_allocd_vfs; /* Number of allocated VFs */ @@ -1015,6 +1183,11 @@ struct ixgbe_aci_info { enum ixgbe_aci_err last_status; /* last status of sent admin command */ }; +enum ixgbe_bank_select { + IXGBE_ACTIVE_FLASH_BANK, + IXGBE_INACTIVE_FLASH_BANK, +}; + /* Option ROM version information */ struct ixgbe_orom_info { u8 major; /* Major version of OROM */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 1fc821fb351a..f1ab95aa8c83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -894,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, .update_checksum = &ixgbe_update_eeprom_checksum_X540, + .read_pba_string = &ixgbe_read_pba_string_generic, }; static const struct ixgbe_phy_operations phy_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 277ceaf8a793..1d2acdb64f45 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3959,6 +3959,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = { .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + .read_pba_string = &ixgbe_read_pba_string_generic, \ static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { X550_COMMON_EEP diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 3e3b471e53f0..ac58964b2f08 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -508,7 +508,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ring *ring; if (test_bit(__IXGBE_DOWN, &adapter->state)) diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 8ba037e3d9c2..65580b9cb06f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, /** * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @dev: pointer to net device to program * @xs: pointer to xfrm_state struct * @mykey: pointer to key array to populate * @mysalt: pointer to salt value to populate @@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, * This copies the protocol keys and salt to our own data tables. The * 82599 family only supports the one algorithm. **/ -static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, +static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev, + struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, /** * ixgbevf_ipsec_add_sa - program device with a security association + * @dev: pointer to net device to program * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ -static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, +static int ixgbevf_ipsec_add_sa(struct net_device *dev, + struct xfrm_state *xs, struct netlink_ext_ack *extack) { - struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; @@ -310,7 +312,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, rsa.decrypt = xs->ealg || xs->aead; /* get the key and salt */ - ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key, + &rsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table"); return ret; @@ -363,7 +366,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, if (xs->id.proto & IPPROTO_ESP) tsa.encrypt = xs->ealg || xs->aead; - ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key, + &tsa.salt); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table"); memset(&tsa, 0, sizeof(tsa)); @@ -388,11 +392,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs, /** * ixgbevf_ipsec_del_sa - clear out this specific SA + * @dev: pointer to net device to program * @xs: pointer to transformer state struct **/ -static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) +static void ixgbevf_ipsec_del_sa(struct net_device *dev, + struct xfrm_state *xs) { - struct net_device *dev = xs->xso.real_dev; struct ixgbevf_adapter *adapter; struct ixgbevf_ipsec *ipsec; u16 sa_idx; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 837295fecd17..50f7c59e8a04 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -34,7 +34,6 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" depends on HAS_IOMEM - select MDIO_DEVRES select PHYLIB help This driver supports the MDIO interface found in the network diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 416a926a8281..a7872d14a49d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5173,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = dev->stats.tx_dropped; } -static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct mvpp2_port *port = netdev_priv(dev); void __iomem *ptp; u32 gcr, int_mask; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (config.tx_type != HWTSTAMP_TX_OFF && - config.tx_type != HWTSTAMP_TX_ON) + if (config->tx_type != HWTSTAMP_TX_OFF && + config->tx_type != HWTSTAMP_TX_ON) return -ERANGE; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); int_mask = gcr = 0; - if (config.tx_type != HWTSTAMP_TX_OFF) { + if (config->tx_type != HWTSTAMP_TX_OFF) { gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0; } /* It seems we must also release the TX reset when enabling the TSU */ - if (config.rx_filter != HWTSTAMP_FILTER_NONE) + if (config->rx_filter != HWTSTAMP_FILTER_NONE) gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET; if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) mvpp22_tai_start(port->priv->tai); - if (config.rx_filter != HWTSTAMP_FILTER_NONE) { - config.rx_filter = HWTSTAMP_FILTER_ALL; + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + config->rx_filter = HWTSTAMP_FILTER_ALL; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | @@ -5225,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) mvpp22_tai_stop(port->priv->tai); - port->tx_hwtstamp_type = config.tx_type; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + port->tx_hwtstamp_type = config->tx_type; return 0; } -static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; - - memset(&config, 0, sizeof(config)); + struct mvpp2_port *port = netdev_priv(dev); - config.tx_type = port->tx_hwtstamp_type; - config.rx_filter = port->rx_hwtstamp ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + config->tx_type = port->tx_hwtstamp_type; + config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; return 0; } @@ -5274,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (port->hwtstamp) - return mvpp2_set_ts_config(port, ifr); - break; - - case SIOCGHWTSTAMP: - if (port->hwtstamp) - return mvpp2_get_ts_config(port, ifr); - break; - } - if (!port->phylink) return -ENOTSUPP; @@ -5799,6 +5785,8 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, .ndo_xdp_xmit = mvpp2_xdp_xmit, + .ndo_hwtstamp_get = mvpp2_hwtstamp_get, + .ndo_hwtstamp_set = mvpp2_hwtstamp_set, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index ccb69bc5c952..420c3f4cf741 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -18,8 +18,6 @@ #include "octep_vf_config.h" #include "octep_vf_main.h" -struct workqueue_struct *octep_vf_wq; - /* Supported Devices */ static const struct pci_device_id octep_vf_pci_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index 1a352f41f823..b9f13506f462 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -320,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) #define octep_vf_read_csr64(octep_vf_dev, reg_off) \ readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) -extern struct workqueue_struct *octep_vf_wq; - int octep_vf_device_setup(struct octep_vf_device *oct); int octep_vf_setup_iqs(struct octep_vf_device *oct); void octep_vf_free_iqs(struct octep_vf_device *oct); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 1e5aa5397504..7d21905deed8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -188,14 +188,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; - struct device *sender = &mbox->pdev->dev; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; usleep_range(800, 1000); } - dev_dbg(sender, "timed out while waiting for rsp\n"); + trace_otx2_msg_wait_rsp(mbox->pdev); return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); @@ -219,6 +218,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + struct mbox_msghdr *msg; u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; @@ -251,7 +251,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; - trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); + msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset); + + trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size, + msg->id, msg->pcifunc); spin_unlock(&mdev->mbox_lock); @@ -445,6 +448,14 @@ const char *otx2_mbox_id2name(u16 id) #define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CGX_MESSAGES +#undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CPT_MESSAGES +#undef M default: return "INVALID ID"; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 005ca8a056c0..a213b2663583 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -524,6 +524,8 @@ struct get_hw_cap_rsp { u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ u8 nix_shaping; /* Is shaping and coloring supported */ u8 npc_hash_extract; /* Is hash extract supported */ +#define HW_CAP_MACSEC BIT_ULL(1) + u64 hw_caps; }; /* CGX mbox message formats */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index 655dd4726d36..0277d226293e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 6575c422635b..a8025f0486c9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; - if (blktype == BLKTYPE_NIX) - rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; @@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (lf < 0) /* This should never happen */ continue; + if (blktype == BLKTYPE_NIX) { + rvu_nix_reset_mac(pfvf, pcifunc); + rvu_npc_clear_ucast_entry(rvu, pcifunc, lf); + } /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); @@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, rsp->nix_shaping = hw->cap.nix_shaping; rsp->npc_hash_extract = hw->cap.npc_hash_extract; + if (rvu->mcs_blk_cnt) + rsp->hw_caps = HW_CAP_MACSEC; + return 0; } @@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, if (rsp && err) \ rsp->hdr.rc = err; \ \ - trace_otx2_msg_process(mbox->pdev, _id, err); \ + trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \ return rsp ? err : -ENOMEM; \ } MBOX_MESSAGES diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 60f085b00a8c..48f66292ad5c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -969,8 +969,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable); void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, @@ -996,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf); + bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 992fa0b82e8d..d0331b0e0bfd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ return req; \ } @@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 821fe242f821..da15bb451178 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -820,24 +820,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable) -{ - struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, index; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) - return; - - /* Get 'pcifunc' of PF device */ - pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; - - index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, - NIXLF_BCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); -} - void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { @@ -1125,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; @@ -1133,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, return; /* Ucast MCAM match entry of this PF/VF */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), + pfvf->nix_rx_intf)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + } /* Nothing to do for VFs, on platforms where pkt replication * is not supported @@ -3588,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, return 0; } + +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int ucast_idx, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); + + npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); + + npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (rule->entry == ucast_idx) { + list_del(&rule->list); + kfree(rule); + break; + } + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c index 052ae5923e3a..32953cca108c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 775fd4c35794..19e0d16b12f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -11,3 +11,5 @@ EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status); +EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 5704520f9b02..4cd0fc4b0d20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -18,33 +18,42 @@ #include "mbox.h" TRACE_EVENT(otx2_msg_alloc, - TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), - TP_ARGS(pdev, id, size), + TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc), + TP_ARGS(pdev, id, size, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(u64, size) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->size = size; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->size) + TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size, + __entry->pcifunc) ); TRACE_EVENT(otx2_msg_send, - TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size), - TP_ARGS(pdev, num_msgs, msg_size), + TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size, + u16 id, u16 pcifunc), + TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, num_msgs) __field(u64, msg_size) + __field(u16, id) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; + __entry->id = id; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev), - __entry->num_msgs, __entry->msg_size) + TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n", + __get_str(dev), __entry->num_msgs, __entry->msg_size, + otx2_mbox_id2name(__entry->id), __entry->pcifunc) ); TRACE_EVENT(otx2_msg_check, @@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt, ); TRACE_EVENT(otx2_msg_process, - TP_PROTO(const struct pci_dev *pdev, u16 id, int err), - TP_ARGS(pdev, id, err), + TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc), + TP_ARGS(pdev, id, err, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(int, err) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->err = err; + __entry->pcifunc = pcifunc; + ), + TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), + __entry->err, __entry->pcifunc) +); + +TRACE_EVENT(otx2_msg_wait_rsp, + TP_PROTO(const struct pci_dev *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + ), + TP_fast_assign(__assign_str(dev) + ), + TP_printk("[%s] timed out while waiting for response\n", + __get_str(dev)) +); + +TRACE_EVENT(otx2_msg_status, + TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs), + TP_ARGS(pdev, msg, num_msgs), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u16, num_msgs) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->num_msgs = num_msgs; + ), + TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev), + __get_str(str), __entry->num_msgs) +); + +TRACE_EVENT(otx2_parse_dump, + TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word), + TP_ARGS(pdev, msg, word), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, w0) + __field(u64, w1) + __field(u64, w2) + __field(u64, w3) + __field(u64, w4) + __field(u64, w5) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->w0 = *(word + 0); + __entry->w1 = *(word + 1); + __entry->w2 = *(word + 2); + __entry->w3 = *(word + 3); + __entry->w4 = *(word + 4); + __entry->w5 = *(word + 5); ), - TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->err) + TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n", + __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2, + __entry->w3, __entry->w4, __entry->w5) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index c3b6e0f60a79..7f6a435ac680 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -357,9 +357,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ - for (qidx = 0; qidx < hw->rx_queues; qidx++) - cn10k_map_unmap_rq_policer(pfvf, qidx, - hw->matchall_ipolicer, false); + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); + if (rc) + dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).", + qidx, rc); + } rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index fc59e50bafce..a6500e3673f2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, return -EOPNOTSUPP; } -static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, +static int cn10k_ipsec_outb_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, if (err) return err; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); if (err) @@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, return 0; } -static int cn10k_ipsec_add_state(struct xfrm_state *x, +static int cn10k_ipsec_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return cn10k_ipsec_inb_add_state(x, extack); else - return cn10k_ipsec_outb_add_state(x, extack); + return cn10k_ipsec_outb_add_state(dev, x, extack); } -static void cn10k_ipsec_del_state(struct xfrm_state *x) +static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; struct cn10k_tx_sa_s *sa_entry; struct qmem *sa_info; struct otx2_nic *pf; @@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) return; - pf = netdev_priv(netdev); + pf = netdev_priv(dev); sa_info = (struct qmem *)x->xso.offload_handle; sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; @@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) err = cn10k_outb_write_sa(pf, sa_info); if (err) - netdev_err(netdev, "Error (%d) deleting SA\n", err); + netdev_err(dev, "Error (%d) deleting SA\n", err); x->xso.offload_handle = 0; qmem_free(pf->dev, sa_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 84cd029a85aa..6f572589f1e5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -2055,6 +2055,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct otx2_hw *hw = &pfvf->hw; + struct get_hw_cap_rsp *rsp; + struct msg_req *req; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_get_hw_cap(mbox); + if (!req) + goto fail; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = -EINVAL; + goto fail; + } + + if (rsp->hw_caps & HW_CAP_MACSEC) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n"); + mutex_unlock(&mbox->lock); + return ret; +} + #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index d6b4b74e4002..ca0e6ab12ceb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -632,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); } - - if (is_dev_cn10kb(pfvf->pdev)) - __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -872,6 +869,7 @@ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ { \ struct _req_type *req; \ + u16 pcifunc = mbox->pfvf->pcifunc; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &mbox->mbox, 0, sizeof(struct _req_type), \ @@ -880,7 +878,8 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + req->hdr.pcifunc = pcifunc; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ return req; \ } @@ -1044,6 +1043,7 @@ void otx2_disable_napi(struct otx2_nic *pf); irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1108,6 +1108,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, u64 iova, int len, u16 qidx, u16 flags); +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index cfed9ec5b157..db7c466fdc39 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -465,6 +465,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)", + vf_mbox->num_msgs); + for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset); @@ -473,7 +476,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) goto inval_msg; /* Set VF's number in each of the msg */ - msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc &= ~RVU_PFVF_FUNC_MASK; msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; offset = msg->next_msgoff; } @@ -503,6 +506,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)", + vf_mbox->up_num_msgs); + for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -819,6 +825,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; + trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); @@ -974,6 +983,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); @@ -1023,6 +1035,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)", + hdr->num_msgs); } if (mbox_data & MBOX_DOWN_MSG) { @@ -1039,6 +1054,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)", + hdr->num_msgs); } return IRQ_HANDLED; @@ -3048,7 +3066,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -3057,7 +3075,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -3067,10 +3085,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -3128,6 +3144,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + otx2_set_hw_capabilities(pf); + err = cn10k_mcs_init(pf); if (err) goto err_del_mcam_entries; @@ -3246,8 +3264,6 @@ err_detach_rsrc: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -3289,6 +3305,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) req = (struct cgx_link_info_msg *)msghdr; req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = pf->pcifunc; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); @@ -3447,8 +3464,6 @@ static void otx2_remove(struct pci_dev *pdev) pci_free_irq_vectors(pf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2_pf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 0a6bb346ba45..99ace381cc78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; + u64 *word = (u64 *)parse; void *end, *start; u32 metasize = 0; u64 *seg_addr; @@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, int seg; if (unlikely(parse->errlev || parse->errcode)) { - if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) { + trace_otx2_parse_dump(pfvf->pdev, "Err:", word); return; + } } + trace_otx2_parse_dump(pfvf->pdev, "", word); if (pfvf->xdp_prog) if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, @@ -1410,9 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } } -static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, - struct xdp_frame *xdpf, - u64 dma_addr, int len, int *offset, u16 flags) +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 9b28be4c4a5d..8a8b598bd389 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -136,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; - rsp->hdr.pcifunc = 0; + rsp->hdr.pcifunc = req->pcifunc; rsp->hdr.rc = 0; err = otx2_mbox_up_handler_cgx_link_event( vf, (struct cgx_link_info_msg *)req, rsp); @@ -548,7 +548,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -557,7 +557,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -565,10 +565,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) qcount = num_online_cpus(); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -768,8 +766,6 @@ err_free_irq_vectors: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -818,8 +814,6 @@ static void otx2vf_remove(struct pci_dev *pdev) pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2vf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c index ce10caea8511..b328aae23d73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c @@ -11,6 +11,7 @@ #include <net/xdp.h> #include "otx2_common.h" +#include "otx2_struct.h" #include "otx2_xsk.h" int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, @@ -196,11 +197,39 @@ void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); } +static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset; + + sq = &pfvf->qset.sq[qidx]; + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); +} + void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, int queue, int budget) { struct xdp_desc *xdp_desc = pool->tx_descs; - int err, i, work_done = 0, batch; + int i, batch; budget = min(budget, otx2_read_free_sqe(pfvf, queue)); batch = xsk_tx_peek_release_desc_batch(pool, budget); @@ -211,15 +240,6 @@ void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, dma_addr_t dma_addr; dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr); - err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len, - queue, OTX2_AF_XDP_FRAME); - if (!err) { - netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err); - break; - } - work_done++; + otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue); } - - if (work_done) - xsk_tx_release(pool); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 35acc07bd964..5765bac119f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force if (!node->is_static) dwrr_del_node = true; + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* destroy the leaf node */ otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); @@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force } kfree(new_cfg); - /* update tx_real_queues */ - otx2_qos_update_tx_netdev_queues(pfvf); - return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index c5dbae0e513b..58d572ce08ef 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -256,6 +256,26 @@ out: return err; } +static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) +{ + struct ndc_sync_op *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->nix_lf_tx_sync = true; + req->npa_lf_sync = true; + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; @@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); + /* NIX/NPA NDC sync */ + otx2_qos_nix_npa_ndc_sync(pfvf); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 7153a71dfc86..2cd3da3b6843 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -765,7 +765,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -774,7 +774,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + goto err_set_drv_data; } pci_set_master(pdev); @@ -782,7 +782,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; - goto err_release_regions; + goto err_set_drv_data; } pci_set_drvdata(pdev, priv); @@ -799,7 +799,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = otx2_init_rsrc(pdev, priv); if (err) - goto err_release_regions; + goto err_set_drv_data; priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -822,9 +822,8 @@ err_detach_rsrc: otx2_disable_mbox_intr(priv); otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(pdev); -err_release_regions: +err_set_drv_data: pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); return err; } @@ -844,7 +843,6 @@ static void rvu_rep_remove(struct pci_dev *pdev) otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); } static struct pci_driver rvu_rep_driver = { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c index 4cd53a2dae46..634f4543c1d7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) static void prestera_counter_stats_work(struct work_struct *work) { - struct delayed_work *dl_work = - container_of(work, struct delayed_work, work); + struct delayed_work *dl_work = to_delayed_work(work); struct prestera_counter *counter = container_of(dl_work, struct prestera_counter, stats_dw); struct prestera_counter_block *block; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index 35857dc19542..c45d108b2f6d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev, goto err_pci_enable_device; } - err = pci_request_regions(pdev, driver_name); + err = pcim_request_all_regions(pdev, driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); + dev_err(&pdev->dev, "pcim_request_all_regions failed\n"); goto err_pci_request_regions; } @@ -938,7 +938,6 @@ err_pci_dev_alloc: err_pp_ioremap: err_mem_ioremap: err_dma_mask: - pci_release_regions(pdev); err_pci_request_regions: err_pci_enable_device: return err; @@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); destroy_workqueue(fw->wq); prestera_fw_uninit(fw); - pci_release_regions(pdev); } static const struct pci_device_id prestera_pci_devices[] = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c index 7c27a19c4d8f..b4c01e2878f6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_path.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -14,7 +14,7 @@ struct mtk_eth_muxc { const char *name; - int cap_bit; + u64 cap_bit; int (*set_path)(struct mtk_eth *eth, u64 path); }; @@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path) return "gmac2_rgmii"; case MTK_ETH_PATH_GMAC2_SGMII: return "gmac2_sgmii"; + case MTK_ETH_PATH_GMAC2_2P5GPHY: + return "gmac2_2p5gphy"; case MTK_ETH_PATH_GMAC2_GEPHY: return "gmac2_gephy"; case MTK_ETH_PATH_GDM1_ESW: @@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path) return 0; } +static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path) +{ + int ret; + + if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) { + ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_GMAC2_V2); + if (ret) + return ret; + + /* Setup mux to 2p5g PHY */ + ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX, + MUX_G2_USXGMII_SEL); + if (ret) + return ret; + + dev_dbg(eth->dev, "path %s in %s updated\n", + mtk_eth_path_name(path), __func__); + } + + return 0; +} + static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path) { unsigned int val = 0; @@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = { .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY, .set_path = set_mux_u3_gmac2_to_qphy, }, { + .name = "mux_gmac2_to_2p5gphy", + .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY, + .set_path = set_mux_gmac2_to_2p5gphy, + }, { .name = "mux_gmac1_gmac2_to_sgmii_rgmii", .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII, .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, @@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) return mtk_eth_mux_setup(eth, path); } +int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id) +{ + u64 path = 0; + + if (mac_id == MTK_GMAC2_ID) + path = MTK_ETH_PATH_GMAC2_2P5GPHY; + + if (!path) + return -EINVAL; + + /* Setup proper MUXes along the path */ + return mtk_eth_mux_setup(eth, path); +} + int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) { u64 path = 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6c92072b4c28..b38e4f2de674 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -503,7 +503,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, static void mtk_setup_bridge_switch(struct mtk_eth *eth) { /* Force Port1 XGMAC Link Up */ - mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID), + mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID), MTK_XGMAC_STS(MTK_GMAC1_ID)); /* Adjust GSW bridge IPG to 11 */ @@ -532,6 +532,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, return NULL; } +static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + + if (mtk_interface_mode_is_xgmii(eth, iface) && + mac->id != MTK_GMAC1_ID) { + mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, + XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id)); + + mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) | + MTK_XGMAC_FORCE_LINK(mac->id), + MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id)); + } + + return 0; +} + static void mtk_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -573,6 +593,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, } break; case PHY_INTERFACE_MODE_INTERNAL: + if (mac->id == MTK_GMAC2_ID && + MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) { + err = mtk_gmac_2p5gphy_path_setup(eth, mac->id); + if (err) + goto init_err; + } break; default: goto err_phy; @@ -644,12 +670,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, } /* Setup gmac */ - if (mtk_is_netsys_v3_or_greater(eth) && - mac->interface == PHY_INTERFACE_MODE_INTERNAL) { + if (mtk_interface_mode_is_xgmii(eth, state->interface)) { mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id)); mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id)); - mtk_setup_bridge_switch(eth); + if (mac->id == MTK_GMAC1_ID) + mtk_setup_bridge_switch(eth); } return; @@ -696,10 +722,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, { struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); - u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK); - mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); + if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) { + /* GMAC modes */ + mtk_m32(mac->hw, + MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0, + MTK_MAC_MCR(mac->id)); + } else if (mac->id != MTK_GMAC1_ID) { + /* XGMAC except for built-in switch */ + mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE, + MTK_XMAC_MCR(mac->id)); + mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0, + MTK_XGMAC_STS(mac->id)); + } } static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, @@ -771,13 +806,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); } -static void mtk_mac_link_up(struct phylink_config *config, - struct phy_device *phy, - unsigned int mode, phy_interface_t interface, - int speed, int duplex, bool tx_pause, bool rx_pause) +static void mtk_gdm_mac_link_up(struct mtk_mac *mac, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, + bool rx_pause) { - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); u32 mcr; mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); @@ -811,6 +845,56 @@ static void mtk_mac_link_up(struct phylink_config *config, mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } +static void mtk_xgdm_mac_link_up(struct mtk_mac *mac, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, + bool rx_pause) +{ + u32 mcr; + + if (mac->id == MTK_GMAC1_ID) + return; + + /* Eliminate the interference(before link-up) caused by PHY noise */ + mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id)); + mdelay(20); + mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR, + MTK_XMAC_CNT_CTRL(mac->id)); + + mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), + MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id)); + + mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id)); + mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC | + XMAC_MCR_TRX_DISABLE); + /* Configure pause modes - + * phylink will avoid these for half duplex + */ + if (tx_pause) + mcr |= XMAC_MCR_FORCE_TX_FC; + if (rx_pause) + mcr |= XMAC_MCR_FORCE_RX_FC; + + mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id)); +} + +static void mtk_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + + if (mtk_interface_mode_is_xgmii(mac->hw, interface)) + mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex, + tx_pause, rx_pause); + else + mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex, + tx_pause, rx_pause); +} + static void mtk_mac_disable_tx_lpi(struct phylink_config *config) { struct mtk_mac *mac = container_of(config, struct mtk_mac, @@ -828,6 +912,9 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, struct mtk_eth *eth = mac->hw; u32 val; + if (mtk_interface_mode_is_xgmii(eth, mac->interface)) + return -EOPNOTSUPP; + /* Tx idle timer in ms */ timer = DIV_ROUND_UP(timer, 1000); @@ -858,6 +945,7 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, } static const struct phylink_mac_ops mtk_phylink_ops = { + .mac_prepare = mtk_mac_prepare, .mac_select_pcs = mtk_mac_select_pcs, .mac_config = mtk_mac_config, .mac_finish = mtk_mac_finish, @@ -4768,6 +4856,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink = phylink; + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) && + id == MTK_GMAC2_ID) + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + mac->phylink_config.supported_interfaces); + SET_NETDEV_DEV(eth->netdev[id], eth->dev); eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 88ef2e9c50fc..6f72a8c8ae1e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -431,7 +431,8 @@ /* XMAC status registers */ #define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C) -#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15)) +#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15)) +#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11)) #define MTK_USXGMII_PCS_LINK BIT(8) #define MTK_XGMAC_RX_FC BIT(5) #define MTK_XGMAC_TX_FC BIT(4) @@ -524,6 +525,21 @@ #define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) #define INTF_MODE_RGMII_10_100 0 +/* XFI Mac control registers */ +#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000)) +#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x)) +#define XMAC_MCR_TRX_DISABLE 0xf +#define XMAC_MCR_FORCE_TX_FC BIT(5) +#define XMAC_MCR_FORCE_RX_FC BIT(4) + +/* XFI Mac logic reset registers */ +#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10) +#define XMAC_LOGIC_RST BIT(0) + +/* XFI Mac count global control */ +#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100) +#define XMAC_GLB_CNTCLR BIT(0) + /* GPIO port control registers for GMAC 2*/ #define GPIO_OD33_CTRL8 0x4c0 #define GPIO_BIAS_CTRL 0xed0 @@ -587,6 +603,10 @@ #define GEPHY_MAC_SEL BIT(1) /* Top misc registers */ +#define TOP_MISC_NETSYS_PCS_MUX 0x0 +#define NETSYS_PCS_MUX_MASK GENMASK(1, 0) +#define MUX_G2_USXGMII_SEL BIT(1) + #define USB_PHY_SWITCH_REG 0x218 #define QPHY_SEL_MASK GENMASK(1, 0) #define SGMII_QPHY_SEL 0x2 @@ -951,6 +971,7 @@ enum mkt_eth_capabilities { MTK_RGMII_BIT = 0, MTK_TRGMII_BIT, MTK_SGMII_BIT, + MTK_2P5GPHY_BIT, MTK_ESW_BIT, MTK_GEPHY_BIT, MTK_MUX_BIT, @@ -971,6 +992,7 @@ enum mkt_eth_capabilities { MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, + MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT, MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, @@ -980,6 +1002,7 @@ enum mkt_eth_capabilities { MTK_ETH_PATH_GMAC1_SGMII_BIT, MTK_ETH_PATH_GMAC2_RGMII_BIT, MTK_ETH_PATH_GMAC2_SGMII_BIT, + MTK_ETH_PATH_GMAC2_2P5GPHY_BIT, MTK_ETH_PATH_GMAC2_GEPHY_BIT, MTK_ETH_PATH_GDM1_ESW_BIT, }; @@ -988,6 +1011,7 @@ enum mkt_eth_capabilities { #define MTK_RGMII BIT_ULL(MTK_RGMII_BIT) #define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT) #define MTK_SGMII BIT_ULL(MTK_SGMII_BIT) +#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT) #define MTK_ESW BIT_ULL(MTK_ESW_BIT) #define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT) #define MTK_MUX BIT_ULL(MTK_MUX_BIT) @@ -1010,6 +1034,8 @@ enum mkt_eth_capabilities { BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) #define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) +#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \ + BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT) #define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) #define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ @@ -1021,6 +1047,7 @@ enum mkt_eth_capabilities { #define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT) #define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT) #define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT) #define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT) #define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT) @@ -1030,6 +1057,7 @@ enum mkt_eth_capabilities { #define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) #define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) #define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) +#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY) #define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) /* MUXes present on SoCs */ @@ -1049,6 +1077,10 @@ enum mkt_eth_capabilities { (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ MTK_SHARED_SGMII) +/* 2: GMAC2 -> 2P5GPHY */ +#define MTK_MUX_GMAC2_TO_2P5GPHY \ + (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA) + /* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ #define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) @@ -1084,8 +1116,9 @@ enum mkt_eth_capabilities { MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ MTK_RSTCTRL_PPE1 | MTK_SRAM) -#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \ - MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM) +#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \ + MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \ + MTK_RSTCTRL_PPE2 | MTK_SRAM) struct mtk_tx_dma_desc_info { dma_addr_t addr; @@ -1145,7 +1178,7 @@ struct mtk_reg_map { }; /* struct mtk_eth_data - This is the structure holding all differences - * among various plaforms + * among various platforms * @reg_map Soc register map. * @ana_rgc3: The offset for register ANA_RGC3 related to * sgmiisys syscon @@ -1245,7 +1278,7 @@ struct mtk_soc_data { * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring * @state: Initialization and runtime state of the device - * @soc: Holding specific data among vaious SoCs + * @soc: Holding specific data among various SoCs */ struct mtk_eth { @@ -1437,6 +1470,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) return MTK_FOE_IB2_MULTICAST; } +static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth, + phy_interface_t interface) +{ + if (!mtk_is_netsys_v3_or_greater(eth)) + return false; + + switch (interface) { + case PHY_INTERFACE_MODE_INTERNAL: + case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_5GBASER: + return true; + default: + return false; + } +} + /* read the hardware status register */ void mtk_stats_update_mac(struct mtk_mac *mac); @@ -1445,6 +1495,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg); u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg); int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index e212a4ba9275..351dd152f4f3 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -2000,7 +2000,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) if (mtk_wed_is_v3_or_greater(dev->hw)) wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); - /* initail tx interrupt trigger */ + /* initial tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | @@ -2011,7 +2011,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, dev->wlan.tx_tbit[1])); - /* initail txfree interrupt trigger */ + /* initial txfree interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index cd17a3f4faf8..a68cd3f0304c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev, if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d7444782bfdd..698a5d1f0d7e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits), GFP_KERNEL); buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 568bbe5f83f5..d292e6a9e22c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -154,7 +154,8 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \ steering/hws/vport.o \ steering/hws/bwc_complex.o \ steering/hws/fs_hws_pools.o \ - steering/hws/fs_hws.o + steering/hws/fs_hws.o \ + steering/hws/action_ste_pool.o # # SF device diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e53dbdc0a7a1..b1aeea7c4a91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -927,8 +927,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force static void cb_timeout_handler(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, - work); + struct delayed_work *dwork = to_delayed_work(work); struct mlx5_cmd_work_ent *ent = container_of(dwork, struct mlx5_cmd_work_ent, cb_timeout_work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 32ed4963b8ad..5b0d03b3efe8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -520,6 +520,12 @@ struct mlx5e_xdpsq { struct mlx5e_channel *channel; } ____cacheline_aligned_in_smp; +struct mlx5e_xdp_buff { + struct xdp_buff xdp; + struct mlx5_cqe64 *cqe; + struct mlx5e_rq *rq; +}; + struct mlx5e_ktls_resync_resp; struct mlx5e_icosq { @@ -716,6 +722,7 @@ struct mlx5e_rq { struct mlx5e_xdpsq *xdpsq; DECLARE_BITMAP(flags, 8); struct page_pool *page_pool; + struct mlx5e_xdp_buff mxbuf; /* AF_XDP zero-copy */ struct xsk_buff_pool *xsk_pool; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 81523825faa2..cb972b2d46e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv) int err = 0; rtnl_lock(); + netdev_lock(priv->netdev); mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) @@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv) out: mutex_unlock(&priv->state_lock); + netdev_unlock(priv->netdev); rtnl_unlock(); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 131ed97ca997..5d0014129a7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -8,6 +8,7 @@ #include "en/fs_tt_redirect.h" #include <linux/list.h> #include <linux/spinlock.h> +#include <net/netdev_lock.h> struct mlx5e_ptp_fs { struct mlx5_flow_handle *l2_rule; @@ -449,8 +450,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work) { struct mlx5e_ptpsq *ptpsq = container_of(work, struct mlx5e_ptpsq, report_unhealthy_work); + struct mlx5e_txqsq *sq = &ptpsq->txqsq; + + /* Recovering the PTP SQ means re-enabling NAPI, which requires the + * netdev instance lock. However, SQ closing has to wait for this work + * task to finish while also holding the same lock. So either get the + * lock or find that the SQ is no longer enabled and thus this work is + * not relevant anymore. + */ + while (!netdev_trylock(sq->netdev)) { + if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)) + return; + msleep(20); + } mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq); + netdev_unlock(sq->netdev); } static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn, @@ -892,7 +907,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, if (err) goto err_free; - netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll); + netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll); mlx5e_ptp_build_params(c, cparams, params); @@ -910,7 +925,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, return 0; err_napi_del: - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); err_free: kvfree(cparams); kvfree(c); @@ -920,7 +935,7 @@ err_free: void mlx5e_ptp_close(struct mlx5e_ptp *c) { mlx5e_ptp_close_queues(c); - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); kvfree(c); } @@ -929,7 +944,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) { int tc; - napi_enable(&c->napi); + napi_enable_locked(&c->napi); if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { for (tc = 0; tc < c->num_tc; tc++) @@ -957,7 +972,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c) mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); } - napi_disable(&c->napi); + napi_disable_locked(&c->napi); } int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index dbd9482359e1..c3bda4612fa9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -107,9 +107,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); - rtnl_lock(); mlx5e_activate_txqsq(sq); - rtnl_unlock(); if (sq->channel) mlx5e_trigger_napi_icosq(sq->channel); @@ -176,7 +174,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) priv = ptpsq->txqsq.priv; - rtnl_lock(); mutex_lock(&priv->state_lock); chs = &priv->channels; netdev = priv->netdev; @@ -196,7 +193,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) netif_carrier_on(netdev); mutex_unlock(&priv->state_lock); - rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 140606fcd23b..b5c19396e096 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -149,7 +149,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); t->stats = &priv->trap_stats.ch; - netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll); + netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll); err = mlx5e_open_trap_rq(priv, t); if (unlikely(err)) @@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) err_close_trap_rq: mlx5e_close_trap_rq(&t->rq); err_napi_del: - netif_napi_del(&t->napi); + netif_napi_del_locked(&t->napi); kvfree(t); return ERR_PTR(err); } @@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap) { mlx5e_tir_destroy(&trap->tir); mlx5e_close_trap_rq(&trap->rq); - netif_napi_del(&trap->napi); + netif_napi_del_locked(&trap->napi); kvfree(trap); } static void mlx5e_activate_trap(struct mlx5e_trap *trap) { - napi_enable(&trap->napi); + napi_enable_locked(&trap->napi); mlx5e_activate_rq(&trap->rq); mlx5e_trigger_napi_sched(&trap->napi); } @@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv) struct mlx5e_trap *trap = priv->en_trap; mlx5e_deactivate_rq(&trap->rq); - napi_disable(&trap->napi); + napi_disable_locked(&trap->napi); } static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv) @@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; + netdev_lock(priv->netdev); switch (trap_ctx->action) { case DEVLINK_TRAP_ACTION_TRAP: err = mlx5e_handle_action_trap(priv, trap_ctx->id); @@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ trap_ctx->action); err = -EINVAL; } + netdev_unlock(priv->netdev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index f803e1c93590..5ce1b463b7a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -707,8 +707,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); page = xdpi.page.page; - /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) - * as we know this is a page_pool page. + /* No need to check page_pool_page_is_pp() as we + * know this is a page_pool page. */ page_pool_recycle_direct(page->pp, page); } while (++n < num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 446e492c6bb8..46ab0a9e8cdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -45,12 +45,6 @@ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \ sizeof(struct mlx5_wqe_inline_seg)) -struct mlx5e_xdp_buff { - struct xdp_buff xdp; - struct mlx5_cqe64 *cqe; - struct mlx5e_rq *rq; -}; - /* XDP packets can be transmitted in different ways. On completion, we need to * distinguish between them to clean up things in a proper way. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 2dd842aac6fc..77f61cd28a79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -259,8 +259,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); - struct xfrm_state *x = sa_entry->x; - struct net_device *netdev; + struct net_device *netdev = sa_entry->dev; struct neighbour *n; u8 addr[ETH_ALEN]; const void *pkey; @@ -270,8 +269,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->type != XFRM_DEV_OFFLOAD_PACKET) return; - netdev = x->xso.real_dev; - mlx5_query_mac_address(mdev, addr); switch (attrs->dir) { case XFRM_DEV_OFFLOAD_IN: @@ -692,17 +689,17 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) return 0; } -static int mlx5e_xfrm_add_state(struct xfrm_state *x, +static int mlx5e_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; - struct net_device *netdev = x->xso.real_dev; struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; gfp_t gfp; int err; - priv = netdev_priv(netdev); + priv = netdev_priv(dev); if (!priv->ipsec) return -EOPNOTSUPP; @@ -713,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, return -ENOMEM; sa_entry->x = x; + sa_entry->dev = dev; sa_entry->ipsec = ipsec; /* Check if this SA is originated from acquire flow temporary SA */ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) @@ -809,7 +807,7 @@ err_xfrm: return err; } -static void mlx5e_xfrm_del_state(struct xfrm_state *x) +static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec *ipsec = sa_entry->ipsec; @@ -822,7 +820,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) WARN_ON(old != sa_entry); } -static void mlx5e_xfrm_free_state(struct xfrm_state *x) +static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec *ipsec = sa_entry->ipsec; @@ -855,8 +853,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, struct mlx5e_ipsec_sa_entry *sa_entry; struct mlx5e_ipsec *ipsec; struct neighbour *n = ptr; - struct net_device *netdev; - struct xfrm_state *x; unsigned long idx; if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID)) @@ -876,11 +872,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, continue; } - x = sa_entry->x; - netdev = x->xso.real_dev; data = sa_entry->work->data; - neigh_ha_snapshot(data->addr, n, netdev); + neigh_ha_snapshot(data->addr, n, sa_entry->dev); queue_work(ipsec->wq, &sa_entry->work->work); } @@ -996,8 +990,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) size_t headers; lockdep_assert(lockdep_is_held(&x->lock) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock)); + lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) || + lockdep_is_held(&net->xfrm.xfrm_state_lock)); if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) return; @@ -1170,7 +1164,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xdo.real_dev; + struct net_device *netdev = x->xdo.dev; struct mlx5e_ipsec_pol_entry *pol_entry; struct mlx5e_priv *priv; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index a63c2289f8af..ffcd0cdeb775 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -274,6 +274,7 @@ struct mlx5e_ipsec_limits { struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_esn_state esn_state; struct xfrm_state *x; + struct net_device *dev; struct mlx5e_ipsec *ipsec; struct mlx5_accel_esp_xfrm_attrs attrs; void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8705cffc747f..5fe016e477b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1147,6 +1147,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) bool reset = true; int err; + netdev_lock(priv->netdev); mutex_lock(&priv->state_lock); new_params = priv->channels.params; @@ -1162,6 +1163,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) &trust_state, reset); mutex_unlock(&priv->state_lock); + netdev_unlock(priv->netdev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index fdf9e9bb99ac..ea078c9f5d15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1689,6 +1689,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, return 0; info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -2059,14 +2060,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, if (err) return err; - dev_hold(dev); - rtnl_unlock(); - err = mlx5_firmware_flash(mdev, fw, NULL); release_firmware(fw); - rtnl_lock(); - dev_put(dev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 05058710d2c7..04a969128161 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs, } /* Need to fix some features.. */ + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); return err; } @@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs, } else if (be16_to_cpu(proto) == ETH_P_8021AD) { clear_bit(vid, fs->vlan->active_svlans); mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9bd166f489e7..ea822c69d137 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,6 +39,7 @@ #include <linux/debugfs.h> #include <linux/if_bridge.h> #include <linux/filter.h> +#include <net/netdev_lock.h> #include <net/netdev_queues.h> #include <net/page_pool/types.h> #include <net/pkt_sched.h> @@ -1903,7 +1904,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, recover_work); + /* Recovering queues means re-enabling NAPI, which requires the netdev + * instance lock. However, SQ closing flows have to wait for work tasks + * to finish while also holding the netdev instance lock. So either get + * the lock or find that the SQ is no longer enabled and thus this work + * is not relevant anymore. + */ + while (!netdev_trylock(sq->netdev)) { + if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)) + return; + msleep(20); + } + mlx5e_reporter_tx_err_cqe(sq); + netdev_unlock(sq->netdev); } static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) @@ -2705,8 +2719,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->aff_mask = irq_get_effective_affinity_mask(irq); c->lag_port = mlx5e_enumerate_lag_port(mdev, ix); - netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix); - netif_napi_set_irq(&c->napi, irq); + netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix); + netif_napi_set_irq_locked(&c->napi, irq); err = mlx5e_open_queues(c, params, cparam); if (unlikely(err)) @@ -2728,7 +2742,7 @@ err_close_queues: mlx5e_close_queues(c); err_napi_del: - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); err_free: kvfree(cparam); @@ -2741,7 +2755,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) { int tc; - napi_enable(&c->napi); + napi_enable_locked(&c->napi); for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); @@ -2773,7 +2787,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) mlx5e_deactivate_txqsq(&c->sq[tc]); mlx5e_qos_deactivate_queues(c); - napi_disable(&c->napi); + napi_disable_locked(&c->napi); } static void mlx5e_close_channel(struct mlx5e_channel *c) @@ -2782,7 +2796,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_xsk(c); mlx5e_close_queues(c); mlx5e_qos_close_queues(c); - netif_napi_del(&c->napi); + netif_napi_del_locked(&c->napi); kvfree(c); } @@ -4276,7 +4290,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) if (!netdev->netdev_ops->ndo_bpf || params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { - xdp_clear_features_flag(netdev); + xdp_set_features_flag_locked(netdev, 0); return; } @@ -4285,7 +4299,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG; - xdp_set_features_flag(netdev, val); + xdp_set_features_flag_locked(netdev, val); } int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) @@ -4968,21 +4982,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) struct net_device *netdev = priv->netdev; int i; - /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues - * through this flow. However, channel closing flows have to wait for - * this work to finish while holding rtnl lock too. So either get the - * lock or find that channels are being closed for other reason and - * this work is not relevant anymore. + /* Recovering the TX queues implies re-enabling NAPI, which requires + * the netdev instance lock. + * However, channel closing flows have to wait for this work to finish + * while holding the same lock. So either get the lock or find that + * channels are being closed for other reason and this work is not + * relevant anymore. */ - while (!rtnl_trylock()) { + while (!netdev_trylock(netdev)) { if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)) return; msleep(20); } - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; - for (i = 0; i < netdev->real_num_tx_queues; i++) { struct netdev_queue *dev_queue = netdev_get_tx_queue(netdev, i); @@ -4996,8 +5008,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) break; } -unlock: - rtnl_unlock(); + netdev_unlock(netdev); } static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -5321,7 +5332,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i, struct mlx5e_rq_stats *xskrq_stats; struct mlx5e_rq_stats *rq_stats; - ASSERT_RTNL(); if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch) return; @@ -5341,7 +5351,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sq_stats *sq_stats; - ASSERT_RTNL(); if (!priv->stats_nch) return; @@ -5362,7 +5371,6 @@ static void mlx5e_get_base_stats(struct net_device *dev, struct mlx5e_ptp *ptp_channel; int i, tc; - ASSERT_RTNL(); if (!mlx5e_is_uplink_rep(priv)) { rx->packets = 0; rx->bytes = 0; @@ -5458,6 +5466,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->netdev_ops = &mlx5e_netdev_ops; netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops; netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); mlx5e_dcbnl_build_netdev(netdev); @@ -5839,9 +5849,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_nic_set_rx_mode(priv); rtnl_lock(); + netdev_lock(netdev); if (netif_running(netdev)) mlx5e_open(netdev); udp_tunnel_nic_reset_ntf(priv->netdev); + netdev_unlock(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5854,9 +5866,16 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_dcbnl_delete_app(priv); rtnl_lock(); + netdev_lock(priv->netdev); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); netif_device_detach(priv->netdev); + if (priv->en_trap) { + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = NULL; + } + netdev_unlock(priv->netdev); rtnl_unlock(); mlx5e_nic_set_rx_mode(priv); @@ -5866,11 +5885,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_monitor_counter_cleanup(priv); mlx5e_disable_blocking_events(priv); - if (priv->en_trap) { - mlx5e_deactivate_trap(priv); - mlx5e_close_trap(priv->en_trap); - priv->en_trap = NULL; - } mlx5e_disable_async_events(priv); mlx5_lag_remove_netdev(mdev, priv->netdev); mlx5_vxlan_reset_to_default(mdev->vxlan); @@ -6125,7 +6139,9 @@ static void mlx5e_update_features(struct net_device *netdev) return; /* features will be updated on netdev registration */ rtnl_lock(); + netdev_lock(netdev); netdev_update_features(netdev); + netdev_unlock(netdev); rtnl_unlock(); } @@ -6136,7 +6152,7 @@ static void mlx5e_reset_channels(struct net_device *netdev) int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; + const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED; const struct mlx5e_profile *profile = priv->profile; int max_nch; int err; @@ -6178,15 +6194,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) * 2. Set our default XPS cpumask. * 3. Build the RQT. * - * rtnl_lock is required by netif_set_real_num_*_queues in case the + * Locking is required by netif_set_real_num_*_queues in case the * netdev has been registered by this point (if this function was called * in the reload or resume flow). */ - if (take_rtnl) + if (need_lock) { rtnl_lock(); + netdev_lock(priv->netdev); + } err = mlx5e_num_channels_changed(priv); - if (take_rtnl) + if (need_lock) { + netdev_unlock(priv->netdev); rtnl_unlock(); + } if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2abab241f03b..63a7a788fb0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -33,6 +33,7 @@ #include <linux/dim.h> #include <linux/debugfs.h> #include <linux/mlx5/fs.h> +#include <net/netdev_lock.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/act_api.h> @@ -803,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_setup_tc = mlx5e_rep_setup_tc, + .ndo_set_mac_address = eth_mac_addr, .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, @@ -885,6 +887,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, { SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops_rep; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); eth_hw_addr_random(netdev); netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; @@ -1344,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) netdev->wanted_features |= NETIF_F_HW_TC; rtnl_lock(); + netdev_lock(netdev); if (netif_running(netdev)) mlx5e_open(netdev); udp_tunnel_nic_reset_ntf(priv->netdev); + netdev_unlock(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -1356,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; rtnl_lock(); + netdev_lock(priv->netdev); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); netif_device_detach(priv->netdev); + netdev_unlock(priv->netdev); rtnl_unlock(); mlx5e_rep_bridge_cleanup(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 5fd70b4d55be..84b1ab8233b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1684,17 +1684,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, prog = rcu_dereference(rq->xdp_prog); if (prog) { - struct mlx5e_xdp_buff mxbuf; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - cqe_bcnt, &mxbuf); - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) + cqe_bcnt, mxbuf); + if (mlx5e_xdp_handle(rq, prog, mxbuf)) return NULL; /* page/packet was consumed by XDP */ - rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; - metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; - cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; + rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; + metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; + cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; } frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); @@ -1713,11 +1713,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; struct mlx5e_wqe_frag_info *head_wi = wi; u16 rx_headroom = rq->buff.headroom; struct mlx5e_frag_page *frag_page; struct skb_shared_info *sinfo; - struct mlx5e_xdp_buff mxbuf; u32 frag_consumed_bytes; struct bpf_prog *prog; struct sk_buff *skb; @@ -1737,8 +1737,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi net_prefetch(va + rx_headroom); mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - frag_consumed_bytes, &mxbuf); - sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); + frag_consumed_bytes, mxbuf); + sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); truesize = 0; cqe_bcnt -= frag_consumed_bytes; @@ -1750,8 +1750,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); - mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, - wi->offset, frag_consumed_bytes); + mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, + frag_page, wi->offset, + frag_consumed_bytes); truesize += frag_info->frag_stride; cqe_bcnt -= frag_consumed_bytes; @@ -1760,7 +1761,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi } prog = rcu_dereference(rq->xdp_prog); - if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { + if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_wqe_frag_info *pwi; @@ -1770,21 +1771,23 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi return NULL; /* page/packet was consumed by XDP */ } - skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz, - mxbuf.xdp.data - mxbuf.xdp.data_hard_start, - mxbuf.xdp.data_end - mxbuf.xdp.data, - mxbuf.xdp.data - mxbuf.xdp.data_meta); + skb = mlx5e_build_linear_skb( + rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz, + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, + mxbuf->xdp.data_end - mxbuf->xdp.data, + mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) return NULL; skb_mark_for_recycle(skb); head_wi->frag_page->frags++; - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { /* sinfo->nr_frags is reset by build_skb, calculate again. */ xdp_update_skb_shared_info(skb, wi - head_wi - 1, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++) pwi->frag_page->frags++; @@ -1984,10 +1987,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx]; u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); struct mlx5e_frag_page *head_page = frag_page; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; u32 frag_offset = head_offset; u32 byte_cnt = cqe_bcnt; struct skb_shared_info *sinfo; - struct mlx5e_xdp_buff mxbuf; unsigned int truesize = 0; struct bpf_prog *prog; struct sk_buff *skb; @@ -2033,9 +2036,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } } - mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf); + mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, + linear_data_len, mxbuf); - sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp); + sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp); while (byte_cnt) { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ @@ -2046,7 +2050,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w else truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset, + mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp, + frag_page, frag_offset, pg_consumed_bytes); byte_cnt -= pg_consumed_bytes; frag_offset = 0; @@ -2054,7 +2059,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } if (prog) { - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_frag_page *pfp; @@ -2067,10 +2072,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return NULL; /* page/packet was consumed by XDP */ } - skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, - linear_frame_sz, - mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0, - mxbuf.xdp.data - mxbuf.xdp.data_meta); + skb = mlx5e_build_linear_skb( + rq, mxbuf->xdp.data_hard_start, linear_frame_sz, + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0, + mxbuf->xdp.data - mxbuf->xdp.data_meta); if (unlikely(!skb)) { mlx5e_page_release_fragmented(rq, &wi->linear_page); return NULL; @@ -2080,13 +2085,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w wi->linear_page.frags++; mlx5e_page_release_fragmented(rq, &wi->linear_page); - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; /* sinfo->nr_frags is reset by build_skb, calculate again. */ xdp_update_skb_shared_info(skb, frag_page - head_page, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); pagep = head_page; do @@ -2097,12 +2103,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } else { dma_addr_t addr; - if (xdp_buff_has_frags(&mxbuf.xdp)) { + if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; xdp_update_skb_shared_info(skb, sinfo->nr_frags, sinfo->xdp_frags_size, truesize, - xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp)); + xdp_buff_is_frag_pfmemalloc( + &mxbuf->xdp)); pagep = frag_page - sinfo->nr_frags; do @@ -2152,20 +2159,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, prog = rcu_dereference(rq->xdp_prog); if (prog) { - struct mlx5e_xdp_buff mxbuf; + struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf; net_prefetchw(va); /* xdp_frame data area */ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, - cqe_bcnt, &mxbuf); - if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { + cqe_bcnt, mxbuf); + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) frag_page->frags++; return NULL; /* page/packet was consumed by XDP */ } - rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start; - metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta; - cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data; + rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start; + metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta; + cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data; } frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1c121b435016..19664fa7f217 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2424,8 +2424,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) } if (priv->rx_ptp_opened) { for (i = 0; i < NUM_PTP_RQ_STATS; i++) - ethtool_sprintf(data, ptp_rq_stats_desc[i].format, - MLX5E_PTP_CHANNEL_IX); + ethtool_puts(data, ptp_rq_stats_desc[i].format); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 8de6fcbd3a03..def5dea1463d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -54,7 +54,7 @@ #define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) -#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 4fd853d19e31..55a8629f0792 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -337,10 +337,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at }; } -static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb) +static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_tx_timestamp(skb); } static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq) @@ -392,7 +393,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt); - mlx5e_tx_skb_update_hwts_flags(skb); + mlx5e_tx_skb_update_ts_flags(skb); sq->pc += wi->num_wqebbs; @@ -625,7 +626,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); mlx5e_tx_mpwqe_add_dseg(sq, &txd); - mlx5e_tx_skb_update_hwts_flags(skb); + mlx5e_tx_skb_update_ts_flags(skb); if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a47c29571f64..1af76da8b132 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -527,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, struct mlx5_flow_rule *dst; void *in_flow_context, *vlan; void *in_match_value; - int reformat_id = 0; + u32 reformat_id = 0; unsigned int inlen; int dst_cnt_size; u32 *in, action; @@ -580,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, action, action); if (!extended_dest && fte->act_dests.action.pkt_reformat) { - struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat; - - if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { - reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); - if (reformat_id < 0) { - mlx5_core_err(dev, - "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n", - pkt_reformat->reformat_type); - err = reformat_id; - goto err_out; - } - } else { - reformat_id = fte->act_dests.action.pkt_reformat->id; + struct mlx5_pkt_reformat *pkt_reformat = + fte->act_dests.action.pkt_reformat; + + err = mlx5_fs_get_packet_reformat_id(pkt_reformat, + &reformat_id); + if (err) { + mlx5_core_err(dev, + "Unsupported pkt_reformat type (%d)\n", + pkt_reformat->reformat_type); + goto err_out; } } - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + reformat_id); if (fte->act_dests.action.modify_hdr) { if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6163bc98d94a..23a7e8e7adfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1830,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, return err; } +int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *id) +{ + switch (pkt_reformat->owner) { + case MLX5_FLOW_RESOURCE_OWNER_FW: + *id = pkt_reformat->id; + return 0; + case MLX5_FLOW_RESOURCE_OWNER_SW: + return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id); + case MLX5_FLOW_RESOURCE_OWNER_HWS: + return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id); + default: + return -EINVAL; + } +} + static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1, struct mlx5_pkt_reformat *p2) { - return p1->owner == p2->owner && - (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ? - p1->id == p2->id : - mlx5_fs_dr_action_get_pkt_reformat_id(p1) == - mlx5_fs_dr_action_get_pkt_reformat_id(p2)); + int err1, err2; + u32 id1, id2; + + if (p1->owner != p2->owner) + return false; + + err1 = mlx5_fs_get_packet_reformat_id(p1, &id1); + err2 = mlx5_fs_get_packet_reformat_id(p2, &id2); + + return !err1 && !err2 && id1 == id2; } static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 0767239f651c..500826229b0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -58,6 +58,7 @@ struct mlx5_flow_definer { enum mlx5_flow_resource_owner { MLX5_FLOW_RESOURCE_OWNER_FW, MLX5_FLOW_RESOURCE_OWNER_SW, + MLX5_FLOW_RESOURCE_OWNER_HWS, }; struct mlx5_modify_hdr { @@ -386,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace struct mlx5_flow_root_namespace *find_root(struct fs_node *node); +int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *id); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 0979d672d47f..79ae3a51a4b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -32,6 +32,7 @@ #include <rdma/ib_verbs.h> #include <linux/mlx5/fs.h> +#include <net/netdev_lock.h> #include "en.h" #include "en/params.h" #include "ipoib.h" @@ -102,6 +103,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev) netdev->netdev_ops = &mlx5i_netdev_ops; netdev->ethtool_ops = &mlx5i_ethtool_ops; + netdev->request_ops_lock = true; + netdev_lockdep_set_classes(netdev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 65a94e46edcf..cec18efadc73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -643,13 +643,6 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, int pin = -1; int err = 0; - /* Reject requests with unsupported flags */ - if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - /* Reject requests to enable time stamping on both edges. */ if ((rq->extts.flags & PTP_STRICT_FLAGS) && (rq->extts.flags & PTP_ENABLE_FEATURE) && @@ -820,12 +813,6 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo return 0; } -static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags) -{ - return ((!mlx5_npps_real_time_supported(mdev) && flags) || - (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE)); -} - static int mlx5_perout_configure(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) @@ -861,12 +848,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, goto unlock; } - /* Reject requests with unsupported flags */ - if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) { - err = -EOPNOTSUPP; - goto unlock; - } - if (on) { pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; @@ -1034,6 +1015,13 @@ static void mlx5_init_pin_config(struct mlx5_core_dev *mdev) clock->ptp_info.verify = mlx5_ptp_verify; clock->ptp_info.pps = 1; + clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; + + if (mlx5_npps_real_time_supported(mdev)) + clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE; + for (i = 0; i < clock->ptp_info.n_pins; i++) { snprintf(clock->ptp_info.pin_config[i].name, sizeof(clock->ptp_info.pin_config[i].name), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 2c5f850c31f6..40024cfa3099 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -148,7 +148,7 @@ out: * Free the IRQ and other resources such as rmap from the system. * BUT doesn't free or remove reference from mlx5. * This function is very important for the shutdown flow, where we need to - * cleanup system resoruces but keep mlx5 objects alive, + * cleanup system resources but keep mlx5 objects alive, * see mlx5_irq_table_free_irqs(). */ static void mlx5_system_free_irq(struct mlx5_irq *irq) @@ -588,7 +588,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) struct mlx5_irq *irq; unsigned long index; - /* There are cases in which we are destrying the irq_table before + /* There are cases in which we are destroying the irq_table before * freeing all the IRQs, fast teardown for example. Hence, free the irqs * which might not have been freed. */ @@ -617,7 +617,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec, if (!mlx5_sf_max_functions(dev)) return 0; if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) { - mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n"); + mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n"); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c index b5332c54d4fb..fb62f3bc4bd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c @@ -72,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action) return action->type; } +struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action) +{ + return action->ctx->mdev; +} + static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx, enum mlx5hws_context_shared_stc_type stc_type, u8 tbl_type) @@ -238,6 +243,7 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx, enum mlx5hws_table_type table_type, bool is_mirror) { + struct mlx5hws_pool *pool; bool use_fixup = false; u32 fw_tbl_type; u32 base_id; @@ -253,13 +259,11 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx, use_fixup = true; break; } + pool = stc_attr->ste_table.ste_pool; if (!is_mirror) - base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool, - &stc_attr->ste_table.ste); + base_id = mlx5hws_pool_get_base_id(pool); else - base_id = - mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool, - &stc_attr->ste_table.ste); + base_id = mlx5hws_pool_get_base_mirror_id(pool); *fixup_stc_attr = *stc_attr; fixup_stc_attr->ste_table.ste_obj_id = base_id; @@ -337,7 +341,7 @@ __must_hold(&ctx->ctrl_lock) if (!mlx5hws_context_cap_dynamic_reparse(ctx)) stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; - obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + obj_0_id = mlx5hws_pool_get_base_id(stc_pool); /* According to table/action limitation change the stc_attr */ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false); @@ -353,7 +357,7 @@ __must_hold(&ctx->ctrl_lock) if (table_type == MLX5HWS_TABLE_TYPE_FDB) { u32 obj_1_id; - obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool); use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, @@ -393,11 +397,11 @@ __must_hold(&ctx->ctrl_lock) stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP; stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; stc_attr.stc_offset = stc->offset; - obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc); + obj_id = mlx5hws_pool_get_base_id(stc_pool); mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); if (table_type == MLX5HWS_TABLE_TYPE_FDB) { - obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc); + obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool); mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr); } @@ -1186,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, struct mlx5hws_action_mh_pattern *pattern, u32 log_bulk_size) { + u16 num_actions, max_mh_actions = 0, hw_max_actions; struct mlx5hws_context *ctx = action->ctx; - u16 num_actions, max_mh_actions = 0; int i, ret, size_in_bytes; u32 pat_id, arg_id = 0; __be64 *new_pattern; size_t pat_max_sz; pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE; + hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE; size_in_bytes = pat_max_sz * sizeof(__be64); new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL); if (!new_pattern) @@ -1203,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, for (i = 0; i < num_of_patterns; i++) { size_t new_num_actions; size_t cur_num_actions; - u32 nope_location; + u32 nop_locations; cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE; - mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions, - pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE, - &new_num_actions, &nope_location, - &new_pattern[i * pat_max_sz]); + ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions, + hw_max_actions, &new_num_actions, + &nop_locations, + &new_pattern[i * pat_max_sz]); + if (ret) { + mlx5hws_err(ctx, "Too many actions after nop insertion\n"); + goto free_new_pat; + } - action[i].modify_header.nope_locations = nope_location; + action[i].modify_header.nop_locations = nop_locations; action[i].modify_header.num_of_actions = new_num_actions; max_mh_actions = max(max_mh_actions, new_num_actions); @@ -1259,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action, MLX5_GET(set_action_in, pattern[i].data, action_type); } else { /* Multiple modify actions require a pattern */ - if (unlikely(action[i].modify_header.nope_locations)) { + if (unlikely(action[i].modify_header.nop_locations)) { size_t pattern_sz; pattern_sz = action[i].modify_header.num_of_actions * @@ -1575,17 +1584,15 @@ hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx) return definer; } -static struct mlx5hws_matcher_action_ste * +static struct mlx5hws_range_action_table * hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, struct mlx5hws_definer *definer, u32 miss_ft_id) { struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; - struct mlx5hws_action_default_stc *default_stc; - struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_range_action_table *table_ste; struct mlx5hws_pool_attr pool_attr = {0}; struct mlx5hws_pool *ste_pool, *stc_pool; - struct mlx5hws_pool_chunk *ste; u32 *rtc_0_id, *rtc_1_id; u32 obj_id; int ret; @@ -1604,7 +1611,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB; pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; - pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; pool_attr.alloc_log_sz = 1; table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); if (!table_ste->pool) { @@ -1616,8 +1622,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, rtc_0_id = &table_ste->rtc_0_id; rtc_1_id = &table_ste->rtc_1_id; ste_pool = table_ste->pool; - ste = &table_ste->ste; - ste->order = 1; rtc_attr.log_size = 0; rtc_attr.log_depth = 0; @@ -1629,18 +1633,16 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, rtc_attr.fw_gen_wqe = true; rtc_attr.is_scnd_range = true; - obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + obj_id = mlx5hws_pool_get_base_id(ste_pool); rtc_attr.pd = ctx->pd_num; rtc_attr.ste_base = obj_id; - rtc_attr.ste_offset = ste->offset; rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false); /* STC is a single resource (obj_id), use any STC for the ID */ stc_pool = ctx->stc_pool; - default_stc = ctx->common_res.default_stc; - obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + obj_id = mlx5hws_pool_get_base_id(stc_pool); rtc_attr.stc_base = obj_id; ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); @@ -1650,11 +1652,11 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx, } /* Create mirror RTC */ - obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool); rtc_attr.ste_base = obj_id; rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true); - obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool); rtc_attr.stc_base = obj_id; ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); @@ -1677,9 +1679,9 @@ free_ste: return NULL; } -static void -hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx, - struct mlx5hws_matcher_action_ste *table_ste) +static void hws_action_destroy_dest_match_range_table( + struct mlx5hws_context *ctx, + struct mlx5hws_range_action_table *table_ste) { mutex_lock(&ctx->ctrl_lock); @@ -1691,12 +1693,11 @@ hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx, mutex_unlock(&ctx->ctrl_lock); } -static int -hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx, - struct mlx5hws_matcher_action_ste *table_ste, - struct mlx5hws_action *hit_ft_action, - struct mlx5hws_definer *range_definer, - u32 min, u32 max) +static int hws_action_create_dest_match_range_fill_table( + struct mlx5hws_context *ctx, + struct mlx5hws_range_action_table *table_ste, + struct mlx5hws_action *hit_ft_action, + struct mlx5hws_definer *range_definer, u32 min, u32 max) { struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0}; struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0}; @@ -1792,7 +1793,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, u32 min, u32 max, u32 flags) { struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; - struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_range_action_table *table_ste; struct mlx5hws_action *hit_ft_action; struct mlx5hws_definer *definer; struct mlx5hws_action *action; @@ -1837,7 +1838,6 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx, stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; - stc_attr.ste_table.ste = table_ste->ste; stc_attr.ste_table.ste_pool = table_ste->pool; stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; @@ -2110,21 +2110,23 @@ static void hws_action_modify_write(struct mlx5hws_send_engine *queue, u32 arg_idx, u8 *arg_data, u16 num_of_actions, - u32 nope_locations) + u32 nop_locations) { u8 *new_arg_data = NULL; int i, j; - if (unlikely(nope_locations)) { + if (unlikely(nop_locations)) { new_arg_data = kcalloc(num_of_actions, MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL); if (unlikely(!new_arg_data)) return; - for (i = 0, j = 0; i < num_of_actions; i++, j++) { - memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE); - if (BIT(i) & nope_locations) + for (i = 0, j = 0; j < num_of_actions; i++, j++) { + if (BIT(i) & nop_locations) j++; + memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE], + &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE], + MLX5HWS_MODIFY_ACTION_SIZE); } } @@ -2220,6 +2222,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, struct mlx5hws_action *action; u32 arg_sz, arg_idx; u8 *single_action; + u8 max_actions; __be32 stc_idx; rule_action = &apply->rule_action[setter->idx_double]; @@ -2247,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply, apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data); - } else { - /* Argument offset multiple with number of args per these actions */ - arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions); - arg_idx = rule_action->modify_header.offset * arg_sz; - - apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); - - if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { - apply->require_dep = 1; - hws_action_modify_write(apply->queue, - action->modify_header.arg_id + arg_idx, - rule_action->modify_header.data, - action->modify_header.num_of_actions, - action->modify_header.nope_locations); - } + return; + } + + /* Argument offset multiple with number of args per these actions */ + max_actions = action->modify_header.max_num_of_actions; + arg_sz = mlx5hws_arg_get_arg_size(max_actions); + arg_idx = rule_action->modify_header.offset * arg_sz; + + apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx); + + if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) { + apply->require_dep = 1; + hws_action_modify_write(apply->queue, + action->modify_header.arg_id + arg_idx, + rule_action->modify_header.data, + action->modify_header.num_of_actions, + action->modify_header.nop_locations); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h index 64b76075f7f8..55a079fdd08f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h @@ -118,6 +118,12 @@ struct mlx5hws_action_template { u8 only_term; }; +struct mlx5hws_range_action_table { + struct mlx5hws_pool *pool; + u32 rtc_0_id; + u32 rtc_1_id; +}; + struct mlx5hws_action { u8 type; u8 flags; @@ -130,7 +136,7 @@ struct mlx5hws_action { u32 pat_id; u32 arg_id; __be64 single_action; - u32 nope_locations; + u32 nop_locations; u8 num_of_patterns; u8 single_action_type; u8 num_of_actions; @@ -186,7 +192,7 @@ struct mlx5hws_action { size_t size; } remove_header; struct { - struct mlx5hws_matcher_action_ste *table_ste; + struct mlx5hws_range_action_table *table_ste; struct mlx5hws_action *hit_ft_action; struct mlx5hws_definer *definer; } range; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c new file mode 100644 index 000000000000..5766a9c82f96 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#include "internal.h" + +static const char * +hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt) +{ + switch (opt) { + case MLX5HWS_POOL_OPTIMIZE_NONE: + return "rx-and-tx"; + case MLX5HWS_POOL_OPTIMIZE_ORIG: + return "rx-only"; + case MLX5HWS_POOL_OPTIMIZE_MIRROR: + return "tx-only"; + default: + return "unknown"; + } +} + +static int +hws_action_ste_table_create_pool(struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_table *action_tbl, + enum mlx5hws_pool_optimize opt, size_t log_sz) +{ + struct mlx5hws_pool_attr pool_attr = { 0 }; + + pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; + pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB; + pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY; + pool_attr.opt_type = opt; + pool_attr.alloc_log_sz = log_sz; + + action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr); + if (!action_tbl->pool) { + mlx5hws_err(ctx, "Failed to allocate STE pool\n"); + return -EINVAL; + } + + return 0; +} + +static int hws_action_ste_table_create_single_rtc( + struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_table *action_tbl, + enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx) +{ + struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 }; + u32 *rtc_id; + + rtc_attr.log_depth = 0; + rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + /* Action STEs use the default always hit definer. */ + rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; + rtc_attr.is_frst_jumbo = false; + rtc_attr.miss_ft_id = 0; + rtc_attr.pd = ctx->pd_num; + rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); + + if (tx) { + rtc_attr.table_type = FS_FT_FDB_TX; + rtc_attr.ste_base = + mlx5hws_pool_get_base_mirror_id(action_tbl->pool); + rtc_attr.stc_base = + mlx5hws_pool_get_base_mirror_id(ctx->stc_pool); + rtc_attr.log_size = + opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz; + rtc_id = &action_tbl->rtc_1_id; + } else { + rtc_attr.table_type = FS_FT_FDB_RX; + rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool); + rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool); + rtc_attr.log_size = + opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz; + rtc_id = &action_tbl->rtc_0_id; + } + + return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id); +} + +static int +hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_table *action_tbl, + enum mlx5hws_pool_optimize opt, size_t log_sz) +{ + int err; + + err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt, + log_sz, false); + if (err) + return err; + + err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt, + log_sz, true); + if (err) { + mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id); + return err; + } + + return 0; +} + +static void +hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl) +{ + mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev, + action_tbl->rtc_1_id); + mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev, + action_tbl->rtc_0_id); +} + +static int +hws_action_ste_table_create_stc(struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_table *action_tbl) +{ + struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 }; + + stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; + stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; + stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; + stc_attr.ste_table.ste_pool = action_tbl->pool; + stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; + + return mlx5hws_action_alloc_single_stc(ctx, &stc_attr, + MLX5HWS_TABLE_TYPE_FDB, + &action_tbl->stc); +} + +static struct mlx5hws_action_ste_table * +hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem) +{ + enum mlx5hws_pool_optimize opt = parent_elem->opt; + struct mlx5hws_context *ctx = parent_elem->ctx; + struct mlx5hws_action_ste_table *action_tbl; + size_t log_sz; + int err; + + log_sz = min(parent_elem->log_sz ? + parent_elem->log_sz + + MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ : + MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ, + MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ); + + action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL); + if (!action_tbl) + return ERR_PTR(-ENOMEM); + + err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz); + if (err) + goto free_tbl; + + err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz); + if (err) + goto destroy_pool; + + err = hws_action_ste_table_create_stc(ctx, action_tbl); + if (err) + goto destroy_rtcs; + + action_tbl->parent_elem = parent_elem; + INIT_LIST_HEAD(&action_tbl->list_node); + action_tbl->last_used = jiffies; + list_add(&action_tbl->list_node, &parent_elem->available); + parent_elem->log_sz = log_sz; + + mlx5hws_dbg(ctx, + "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n", + hws_pool_opt_to_str(opt), log_sz, + mlx5hws_pool_get_base_id(action_tbl->pool), + mlx5hws_pool_get_base_mirror_id(action_tbl->pool), + action_tbl->rtc_0_id, action_tbl->rtc_1_id, + action_tbl->stc.offset); + + return action_tbl; + +destroy_rtcs: + hws_action_ste_table_destroy_rtcs(action_tbl); +destroy_pool: + mlx5hws_pool_destroy(action_tbl->pool); +free_tbl: + kfree(action_tbl); + + return ERR_PTR(err); +} + +static void +hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl) +{ + struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx; + + mlx5hws_dbg(ctx, + "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n", + hws_pool_opt_to_str(action_tbl->parent_elem->opt), + mlx5hws_pool_get_base_id(action_tbl->pool), + mlx5hws_pool_get_base_mirror_id(action_tbl->pool), + action_tbl->rtc_0_id, action_tbl->rtc_1_id, + action_tbl->stc.offset); + + mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB, + &action_tbl->stc); + hws_action_ste_table_destroy_rtcs(action_tbl); + mlx5hws_pool_destroy(action_tbl->pool); + + list_del(&action_tbl->list_node); + kfree(action_tbl); +} + +static int +hws_action_ste_pool_element_init(struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_pool_element *elem, + enum mlx5hws_pool_optimize opt) +{ + elem->ctx = ctx; + elem->opt = opt; + INIT_LIST_HEAD(&elem->available); + INIT_LIST_HEAD(&elem->full); + + return 0; +} + +static void hws_action_ste_pool_element_destroy( + struct mlx5hws_action_ste_pool_element *elem) +{ + struct mlx5hws_action_ste_table *action_tbl, *p; + + /* This should be empty, but attempt to free its elements anyway. */ + list_for_each_entry_safe(action_tbl, p, &elem->full, list_node) + hws_action_ste_table_destroy(action_tbl); + + list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) + hws_action_ste_table_destroy(action_tbl); +} + +static int hws_action_ste_pool_init(struct mlx5hws_context *ctx, + struct mlx5hws_action_ste_pool *pool) +{ + enum mlx5hws_pool_optimize opt; + int err; + + mutex_init(&pool->lock); + + /* Rules which are added for both RX and TX must use the same action STE + * indices for both. If we were to use a single table, then RX-only and + * TX-only rules would waste the unused entries. Thus, we use separate + * table sets for the three cases. + */ + for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX; + opt++) { + err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt], + opt); + if (err) + goto destroy_elems; + pool->elems[opt].parent_pool = pool; + } + + return 0; + +destroy_elems: + while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE) + hws_action_ste_pool_element_destroy(&pool->elems[opt]); + + return err; +} + +static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool) +{ + int opt; + + for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1; + opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--) + hws_action_ste_pool_element_destroy(&pool->elems[opt]); +} + +static void hws_action_ste_pool_element_collect_stale( + struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup) +{ + struct mlx5hws_action_ste_table *action_tbl, *p; + unsigned long expire_time, now; + + expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS); + now = jiffies; + + list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) { + if (mlx5hws_pool_full(action_tbl->pool) && + time_before(action_tbl->last_used + expire_time, now)) + list_move(&action_tbl->list_node, cleanup); + } +} + +static void hws_action_ste_table_cleanup_list(struct list_head *cleanup) +{ + struct mlx5hws_action_ste_table *action_tbl, *p; + + list_for_each_entry_safe(action_tbl, p, cleanup, list_node) + hws_action_ste_table_destroy(action_tbl); +} + +static void hws_action_ste_pool_cleanup(struct work_struct *work) +{ + enum mlx5hws_pool_optimize opt; + struct mlx5hws_context *ctx; + LIST_HEAD(cleanup); + int i; + + ctx = container_of(work, struct mlx5hws_context, + action_ste_cleanup.work); + + for (i = 0; i < ctx->queues; i++) { + struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i]; + + mutex_lock(&p->lock); + for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; + opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++) + hws_action_ste_pool_element_collect_stale( + &p->elems[opt], &cleanup); + mutex_unlock(&p->lock); + } + + hws_action_ste_table_cleanup_list(&cleanup); + + schedule_delayed_work(&ctx->action_ste_cleanup, + secs_to_jiffies( + MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS)); +} + +int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx) +{ + struct mlx5hws_action_ste_pool *pool; + size_t queues = ctx->queues; + int i, err; + + pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL); + if (!pool) + return -ENOMEM; + + for (i = 0; i < queues; i++) { + err = hws_action_ste_pool_init(ctx, &pool[i]); + if (err) + goto free_pool; + } + + ctx->action_ste_pool = pool; + + INIT_DELAYED_WORK(&ctx->action_ste_cleanup, + hws_action_ste_pool_cleanup); + schedule_delayed_work( + &ctx->action_ste_cleanup, + secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS)); + + return 0; + +free_pool: + while (i--) + hws_action_ste_pool_destroy(&pool[i]); + kfree(pool); + + return err; +} + +void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx) +{ + size_t queues = ctx->queues; + int i; + + cancel_delayed_work_sync(&ctx->action_ste_cleanup); + + for (i = 0; i < queues; i++) + hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]); + + kfree(ctx->action_ste_pool); +} + +static struct mlx5hws_action_ste_pool_element * +hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool, + bool skip_rx, bool skip_tx) +{ + if (skip_rx) + return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR]; + + if (skip_tx) + return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG]; + + return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE]; +} + +static int +hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl, + struct mlx5hws_action_ste_chunk *chunk) +{ + int err; + + err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste); + if (err) + return err; + + chunk->action_tbl = action_tbl; + action_tbl->last_used = jiffies; + + return 0; +} + +int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool, + bool skip_rx, bool skip_tx, + struct mlx5hws_action_ste_chunk *chunk) +{ + struct mlx5hws_action_ste_pool_element *elem; + struct mlx5hws_action_ste_table *action_tbl; + bool found; + int err; + + if (skip_rx && skip_tx) + return -EINVAL; + + mutex_lock(&pool->lock); + + elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx); + + mlx5hws_dbg(elem->ctx, + "Allocating action STEs skip_rx %d skip_tx %d order %d\n", + skip_rx, skip_tx, chunk->ste.order); + + found = false; + list_for_each_entry(action_tbl, &elem->available, list_node) { + if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) { + found = true; + break; + } + } + + if (!found) { + action_tbl = hws_action_ste_table_alloc(elem); + if (IS_ERR(action_tbl)) { + err = PTR_ERR(action_tbl); + goto out; + } + + err = hws_action_ste_table_chunk_alloc(action_tbl, chunk); + if (err) + goto out; + } + + if (mlx5hws_pool_empty(action_tbl->pool)) + list_move(&action_tbl->list_node, &elem->full); + + err = 0; + +out: + mutex_unlock(&pool->lock); + + return err; +} + +void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk) +{ + struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock; + + mlx5hws_dbg(chunk->action_tbl->pool->ctx, + "Freeing action STEs offset %d order %d\n", + chunk->ste.offset, chunk->ste.order); + + mutex_lock(lock); + mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste); + chunk->action_tbl->last_used = jiffies; + list_move(&chunk->action_tbl->list_node, + &chunk->action_tbl->parent_elem->available); + mutex_unlock(lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h new file mode 100644 index 000000000000..a8ba97359e31 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */ + +#ifndef ACTION_STE_POOL_H_ +#define ACTION_STE_POOL_H_ + +#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10 +#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1 +#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20 + +#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300 +#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300 + +struct mlx5hws_action_ste_pool_element; + +struct mlx5hws_action_ste_table { + struct mlx5hws_action_ste_pool_element *parent_elem; + /* Wraps the RTC and STE range for this given action. */ + struct mlx5hws_pool *pool; + /* Match STEs use this STC to jump to this pool's RTC. */ + struct mlx5hws_pool_chunk stc; + u32 rtc_0_id; + u32 rtc_1_id; + struct list_head list_node; + unsigned long last_used; +}; + +struct mlx5hws_action_ste_pool_element { + struct mlx5hws_context *ctx; + struct mlx5hws_action_ste_pool *parent_pool; + size_t log_sz; /* Size of the largest table so far. */ + enum mlx5hws_pool_optimize opt; + struct list_head available; + struct list_head full; +}; + +/* Central repository of action STEs. The context contains one of these pools + * per queue. + */ +struct mlx5hws_action_ste_pool { + /* Protects the entire pool. We have one pool per queue and only one + * operation can be active per rule at a given time. Thus this lock + * protects solely against concurrent garbage collection and we expect + * very little contention. + */ + struct mutex lock; + struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX]; +}; + +/* A chunk of STEs and the table it was allocated from. Used by rules. */ +struct mlx5hws_action_ste_chunk { + struct mlx5hws_action_ste_table *action_tbl; + struct mlx5hws_pool_chunk ste; +}; + +int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx); + +void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx); + +/* Callers are expected to fill chunk->ste.order. On success, this function + * populates chunk->tbl and chunk->ste.offset. + */ +int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool, + bool skip_rx, bool skip_tx, + struct mlx5hws_action_ste_chunk *chunk); + +void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk); + +#endif /* ACTION_STE_POOL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 19dce1ba512d..9e057f808ea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -46,10 +46,14 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx) } } -static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, +static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher, u32 priority, - u8 size_log) + u8 size_log, + struct mlx5hws_matcher_attr *attr) { + struct mlx5hws_bwc_matcher *first_matcher = + bwc_matcher->complex_first_bwc_matcher; + memset(attr, 0, sizeof(*attr)); attr->priority = priority; @@ -61,6 +65,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr, attr->rule.num_log = size_log; attr->resizable = true; attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; + + attr->isolated_matcher_end_ft_id = + first_matcher ? first_matcher->matcher->end_ft_id : 0; } int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, @@ -83,20 +90,27 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher, for (i = 0; i < bwc_queues; i++) INIT_LIST_HEAD(&bwc_matcher->rules[i]); - hws_bwc_matcher_init_attr(&attr, + hws_bwc_matcher_init_attr(bwc_matcher, priority, - MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG); + MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG, + &attr); bwc_matcher->priority = priority; bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG; + bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM; + bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array, + sizeof(*bwc_matcher->at), GFP_KERNEL); + if (!bwc_matcher->at) + goto free_bwc_matcher_rules; + /* create dummy action template */ bwc_matcher->at[0] = mlx5hws_action_template_create(action_types ? action_types : init_action_types); if (!bwc_matcher->at[0]) { mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n"); - goto free_bwc_matcher_rules; + goto free_bwc_matcher_at_array; } bwc_matcher->num_of_at = 1; @@ -126,6 +140,8 @@ free_mt: mlx5hws_match_template_destroy(bwc_matcher->mt); free_at: mlx5hws_action_template_destroy(bwc_matcher->at[0]); +free_bwc_matcher_at_array: + kfree(bwc_matcher->at); free_bwc_matcher_rules: kfree(bwc_matcher->rules); err: @@ -153,6 +169,7 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table, return NULL; atomic_set(&bwc_matcher->num_of_rules, 0); + atomic_set(&bwc_matcher->rehash_required, false); /* Check if the required match params can be all matched * in single STE, otherwise complex matcher is needed. @@ -192,6 +209,7 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher) for (i = 0; i < bwc_matcher->num_of_at; i++) mlx5hws_action_template_destroy(bwc_matcher->at[i]); + kfree(bwc_matcher->at); mlx5hws_match_template_destroy(bwc_matcher->mt); kfree(bwc_matcher->rules); @@ -208,16 +226,19 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) "BWC matcher destroy: matcher still has %d rules\n", num_of_rules); - mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + if (bwc_matcher->complex) + mlx5hws_bwc_matcher_destroy_complex(bwc_matcher); + else + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); kfree(bwc_matcher); return 0; } -static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, - u16 queue_id, - u32 *pending_rules, - bool drain) +int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain) { unsigned long timeout = jiffies + secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT); @@ -320,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx) { struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - atomic_inc(&bwc_matcher->num_of_rules); bwc_rule->bwc_queue_idx = idx; list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]); } static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule) { - struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; - - atomic_dec(&bwc_matcher->num_of_rules); list_del_init(&bwc_rule->list_node); } @@ -352,7 +369,8 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); if (unlikely(ret)) return ret; @@ -382,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) mutex_lock(queue_lock); ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr); + atomic_dec(&bwc_matcher->num_of_rules); hws_bwc_rule_list_remove(bwc_rule); mutex_unlock(queue_lock); @@ -391,9 +410,13 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule) int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule) { - int ret; + bool is_complex = !!bwc_rule->bwc_matcher->complex; + int ret = 0; - ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + if (is_complex) + ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule); + else + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); mlx5hws_bwc_rule_free(bwc_rule); return ret; @@ -433,9 +456,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); - - return ret; + return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); } static int @@ -456,7 +478,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) return ret; - ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true); + ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id, + &expected_completions, true); if (unlikely(ret)) mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret); @@ -469,21 +492,9 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher) struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps; /* check the match RTC size */ - if ((bwc_matcher->size_log + - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH + - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) > - (caps->ste_alloc_log_max - 1)) - return true; - - /* check the action RTC size */ - if ((bwc_matcher->size_log + - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP + - ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) + - MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) > - (caps->ste_alloc_log_max - 1)) - return true; - - return false; + return (bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH + + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) > + (caps->ste_alloc_log_max - 1); } static bool @@ -520,6 +531,23 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher, struct mlx5hws_rule_action rule_actions[]) { enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS]; + void *p; + + if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) { + if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT) + return -ENOMEM; + bwc_matcher->size_of_at_array *= 2; + p = krealloc(bwc_matcher->at, + bwc_matcher->size_of_at_array * + sizeof(*bwc_matcher->at), + __GFP_ZERO | GFP_KERNEL); + if (!p) { + bwc_matcher->size_of_at_array /= 2; + return -ENOMEM; + } + + bwc_matcher->at = p; + } hws_bwc_rule_actions_to_action_types(rule_actions, action_types); @@ -582,100 +610,79 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher, static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher) { + bool move_error = false, poll_error = false, drain_error = false; struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = bwc_matcher->matcher; u16 bwc_queues = mlx5hws_bwc_queues(ctx); - struct mlx5hws_bwc_rule **bwc_rules; struct mlx5hws_rule_attr rule_attr; - u32 *pending_rules; - int i, j, ret = 0; - bool all_done; - u16 burst_th; + struct mlx5hws_bwc_rule *bwc_rule; + struct mlx5hws_send_engine *queue; + struct list_head *rules_list; + u32 pending_rules; + int i, ret = 0; mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); - pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL); - if (!pending_rules) - return -ENOMEM; - - bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL); - if (!bwc_rules) { - ret = -ENOMEM; - goto free_pending_rules; - } - for (i = 0; i < bwc_queues; i++) { if (list_empty(&bwc_matcher->rules[i])) - bwc_rules[i] = NULL; - else - bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i], - struct mlx5hws_bwc_rule, - list_node); - } - - do { - all_done = true; + continue; - for (i = 0; i < bwc_queues; i++) { - rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); - burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id); + pending_rules = 0; + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + rules_list = &bwc_matcher->rules[i]; - for (j = 0; j < burst_th && bwc_rules[i]; j++) { - rule_attr.burst = !!((j + 1) % burst_th); - ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher, - bwc_rules[i]->rule, - &rule_attr); - if (unlikely(ret)) { - mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", - ret); - goto free_bwc_rules; - } - - all_done = false; - pending_rules[i]++; - bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node, - &bwc_matcher->rules[i]) ? - NULL : list_next_entry(bwc_rules[i], list_node); + list_for_each_entry(bwc_rule, rules_list, list_node) { + ret = mlx5hws_matcher_resize_rule_move(matcher, + bwc_rule->rule, + &rule_attr); + if (unlikely(ret && !move_error)) { + mlx5hws_err(ctx, + "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n", + ret); + move_error = true; + } - ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id, - &pending_rules[i], false); - if (unlikely(ret)) { - mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", - ret); - goto free_bwc_rules; - } + pending_rules++; + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &pending_rules, + false); + if (unlikely(ret && !poll_error)) { + mlx5hws_err(ctx, + "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = true; } } - } while (!all_done); - /* drain all the bwc queues */ - for (i = 0; i < bwc_queues; i++) { - if (pending_rules[i]) { - u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i); - - mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]); - ret = hws_bwc_queue_poll(ctx, queue_id, - &pending_rules[i], true); - if (unlikely(ret)) { + if (pending_rules) { + queue = &ctx->send_queue[rule_attr.queue_id]; + mlx5hws_send_engine_flush_queue(queue); + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &pending_rules, + true); + if (unlikely(ret && !drain_error)) { mlx5hws_err(ctx, - "Moving BWC rule failed during rehash (%d)\n", ret); - goto free_bwc_rules; + "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n", + ret); + drain_error = true; } } } -free_bwc_rules: - kfree(bwc_rules); -free_pending_rules: - kfree(pending_rules); + if (move_error || poll_error || drain_error) + ret = -EINVAL; return ret; } static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher) { - return hws_bwc_matcher_move_all_simple(bwc_matcher); + if (!bwc_matcher->complex) + return hws_bwc_matcher_move_all_simple(bwc_matcher); + + return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher); } static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) @@ -686,9 +693,10 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) struct mlx5hws_matcher *new_matcher; int ret; - hws_bwc_matcher_init_attr(&matcher_attr, + hws_bwc_matcher_init_attr(bwc_matcher, bwc_matcher->priority, - bwc_matcher->size_log); + bwc_matcher->size_log, + &matcher_attr); old_matcher = bwc_matcher->matcher; new_matcher = mlx5hws_matcher_create(old_matcher->tbl, @@ -708,15 +716,18 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher) } ret = hws_bwc_matcher_move_all(bwc_matcher); - if (ret) { - mlx5hws_err(ctx, "Rehash error: moving rules failed\n"); - return -ENOMEM; - } + if (ret) + mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n"); + + /* Error during rehash can't be rolled back. + * The best option here is to allow the rehash to complete and remove + * the old matcher - can't leave the matcher in the 'in_resize' state. + */ bwc_matcher->matcher = new_matcher; mlx5hws_matcher_destroy(old_matcher); - return 0; + return ret; } static int @@ -733,9 +744,9 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) /* It is possible that other rule has already performed rehash. * Need to check again if we really need rehash. - * If the reason for rehash was size, but not any more - skip rehash. */ - if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, + if (!atomic_read(&bwc_matcher->rehash_required) && + !hws_bwc_matcher_rehash_size_needed(bwc_matcher, atomic_read(&bwc_matcher->num_of_rules))) return 0; @@ -746,6 +757,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) * - destroy the old matcher */ + atomic_set(&bwc_matcher->rehash_required, false); + ret = hws_bwc_matcher_extend_size(bwc_matcher); if (ret) return ret; @@ -753,17 +766,51 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher) return hws_bwc_matcher_move(bwc_matcher); } -static int -hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher) +static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_rule_action rule_actions[], + u16 bwc_queue_idx) { - /* Rehash by action template doesn't require any additional checking. - * The bwc_matcher already contains the new action template. - * Just do the usual rehash: - * - create new matcher - * - move all the rules to the new matcher - * - destroy the old matcher - */ - return hws_bwc_matcher_move(bwc_matcher); + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mutex *queue_lock; /* Protect the queue */ + int at_idx, ret; + + /* check if rehash needed due to missing action template */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (likely(at_idx >= 0)) + return at_idx; + + /* we need to extend BWC matcher action templates array */ + queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx); + mutex_unlock(queue_lock); + hws_bwc_lock_all_queues(ctx); + + /* check again - perhaps other thread already did extend_at */ + at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + if (at_idx >= 0) + goto out; + + ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); + if (unlikely(ret)) { + mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret); + at_idx = -EINVAL; + goto out; + } + + /* action templates array was extended, we need the last idx */ + at_idx = bwc_matcher->num_of_at - 1; + ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, + bwc_matcher->at[at_idx]); + if (unlikely(ret)) { + mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret); + at_idx = -EINVAL; + goto out; + } + +out: + hws_bwc_unlock_all_queues(ctx); + mutex_lock(queue_lock); + return at_idx; } int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, @@ -786,50 +833,16 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, mutex_lock(queue_lock); - /* check if rehash needed due to missing action template */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx); if (unlikely(at_idx < 0)) { - /* we need to extend BWC matcher action templates array */ mutex_unlock(queue_lock); - hws_bwc_lock_all_queues(ctx); - - ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - return ret; - } - - /* action templates array was extended, we need the last idx */ - at_idx = bwc_matcher->num_of_at - 1; - - ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, - bwc_matcher->at[at_idx]); - if (unlikely(ret)) { - /* Action template attach failed, possibly due to - * requiring more action STEs. - * Need to attempt creating new matcher with all - * the action templates, including the new one. - */ - ret = hws_bwc_matcher_rehash_at(bwc_matcher); - if (unlikely(ret)) { - mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); - bwc_matcher->at[at_idx] = NULL; - bwc_matcher->num_of_at--; - - hws_bwc_unlock_all_queues(ctx); - - mlx5hws_err(ctx, - "BWC rule insertion: rehash AT failed (%d)\n", ret); - return ret; - } - } - - hws_bwc_unlock_all_queues(ctx); - mutex_lock(queue_lock); + mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)", + ret); + return -EINVAL; } /* check if number of rules require rehash */ - num_of_rules = atomic_read(&bwc_matcher->num_of_rules); + num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules); if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) { mutex_unlock(queue_lock); @@ -843,6 +856,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP, bwc_matcher->size_log, ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } @@ -867,6 +881,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, * Try rehash by size and insert rule again - last chance. */ + atomic_set(&bwc_matcher->rehash_required, true); mutex_unlock(queue_lock); hws_bwc_lock_all_queues(ctx); @@ -875,6 +890,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, if (ret) { mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } @@ -890,6 +906,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule, if (unlikely(ret)) { mutex_unlock(queue_lock); mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret); + atomic_dec(&bwc_matcher->num_of_rules); return ret; } @@ -921,11 +938,18 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher, bwc_queue_idx = hws_bwc_gen_queue_idx(ctx); - ret = mlx5hws_bwc_rule_create_simple(bwc_rule, - params->match_buf, - rule_actions, - flow_source, - bwc_queue_idx); + if (bwc_matcher->complex) + ret = mlx5hws_bwc_rule_create_complex(bwc_rule, + params, + flow_source, + rule_actions, + bwc_queue_idx); + else + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions, + flow_source, + bwc_queue_idx); if (unlikely(ret)) { mlx5hws_bwc_rule_free(bwc_rule); return NULL; @@ -952,52 +976,11 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, mutex_lock(queue_lock); - /* check if rehash needed due to missing action template */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); + at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx); if (unlikely(at_idx < 0)) { - /* we need to extend BWC matcher action templates array */ mutex_unlock(queue_lock); - hws_bwc_lock_all_queues(ctx); - - /* check again - perhaps other thread already did extend_at */ - at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions); - if (likely(at_idx < 0)) { - ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions); - if (unlikely(ret)) { - hws_bwc_unlock_all_queues(ctx); - mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret); - return -EINVAL; - } - - /* action templates array was extended, we need the last idx */ - at_idx = bwc_matcher->num_of_at - 1; - - ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher, - bwc_matcher->at[at_idx]); - if (unlikely(ret)) { - /* Action template attach failed, possibly due to - * requiring more action STEs. - * Need to attempt creating new matcher with all - * the action templates, including the new one. - */ - ret = hws_bwc_matcher_rehash_at(bwc_matcher); - if (unlikely(ret)) { - mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]); - bwc_matcher->at[at_idx] = NULL; - bwc_matcher->num_of_at--; - - hws_bwc_unlock_all_queues(ctx); - - mlx5hws_err(ctx, - "BWC rule update: rehash AT failed (%d)\n", - ret); - return ret; - } - } - } - - hws_bwc_unlock_all_queues(ctx); - mutex_lock(queue_lock); + mlx5hws_err(ctx, "BWC rule update: failed getting AT\n"); + return -EINVAL; } ret = hws_bwc_rule_update_sync(bwc_rule, @@ -1023,5 +1006,10 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule, return -EINVAL; } - return hws_bwc_rule_action_update(bwc_rule, rule_actions); + /* For complex rule, the update should happen on the second matcher */ + if (bwc_rule->isolated_bwc_rule) + return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule, + rule_actions); + else + return hws_bwc_rule_action_update(bwc_rule, rule_actions); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index 47f7ed141553..d21fc247a510 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -10,9 +10,7 @@ #define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32 /* Max number of AT attach operations for the same matcher. - * When the limit is reached, next attempt to attach new AT - * will result in creation of a new matcher and moving all - * the rules to this matcher. + * When the limit is reached, a larger buffer is allocated for the ATs. */ #define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8 @@ -20,20 +18,27 @@ #define MLX5HWS_BWC_POLLING_TIMEOUT 60 +struct mlx5hws_bwc_matcher_complex_data; struct mlx5hws_bwc_matcher { struct mlx5hws_matcher *matcher; struct mlx5hws_match_template *mt; - struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM]; - u32 priority; + struct mlx5hws_action_template **at; + struct mlx5hws_bwc_matcher_complex_data *complex; + struct mlx5hws_bwc_matcher *complex_first_bwc_matcher; u8 num_of_at; + u8 size_of_at_array; u8 size_log; + u32 priority; atomic_t num_of_rules; + atomic_t rehash_required; struct list_head *rules; }; struct mlx5hws_bwc_rule { struct mlx5hws_bwc_matcher *bwc_matcher; struct mlx5hws_rule *rule; + struct mlx5hws_bwc_rule *isolated_bwc_rule; + struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node; u16 bwc_queue_idx; struct list_head list_node; }; @@ -65,6 +70,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher, u32 flow_source, struct mlx5hws_rule_attr *rule_attr); +int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx, + u16 queue_id, + u32 *pending_rules, + bool drain); + static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx) { /* Besides the control queue, half of the queues are diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c index 9fb059a6511f..70768953a4f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c @@ -3,6 +3,22 @@ #include "internal.h" +#define HWS_CLEAR_MATCH_PARAM(mask, field) \ + MLX5_SET(fte_match_param, (mask)->match_buf, field, 0) + +#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) + +static const struct rhashtable_params hws_refcount_hash = { + .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node, + match_buf), + .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node, + match_buf), + .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node, + hash_node), + .automatic_shrinking = true, + .min_size = 1, +}; + bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask) @@ -48,20 +64,1078 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, return is_complex; } +static void +hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx, + enum mlx5hws_definer_fname fname, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_cmd_query_caps *caps = ctx->caps; + + switch (fname) { + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O: + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I: + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O: + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I: + case MLX5HWS_DEFINER_FNAME_IP_VERSION_O: + case MLX5HWS_DEFINER_FNAME_IP_VERSION_I: + /* Because of the strict requirements for IP address matching + * that require ethtype/ip_version matching as well, don't clear + * these fields - have them in both parts of the complex matcher + */ + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_ID_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_ID_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_second_cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_second_svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_second_cvlan_tag); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_second_svlan_tag); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid); + break; + case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl); + break; + case MLX5HWS_DEFINER_FNAME_IP_DSCP_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp); + break; + case MLX5HWS_DEFINER_FNAME_IP_DSCP_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp); + break; + case MLX5HWS_DEFINER_FNAME_IP_ECN_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn); + break; + case MLX5HWS_DEFINER_FNAME_IP_ECN_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn); + break; + case MLX5HWS_DEFINER_FNAME_IP_TTL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit); + break; + case MLX5HWS_DEFINER_FNAME_IP_TTL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_DST_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_DST_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IP_FRAG_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag); + break; + case MLX5HWS_DEFINER_FNAME_IP_FRAG_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.outer_ipv6_flow_label); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.inner_ipv6_flow_label); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O: + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I: + case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I: + case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I: + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32); + HWS_CLEAR_MATCH_PARAM(mask, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0); + break; + case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol); + break; + case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol); + break; + case MLX5HWS_DEFINER_FNAME_L4_SPORT_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport); + break; + case MLX5HWS_DEFINER_FNAME_L4_SPORT_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_L4_DPORT_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_L4_DPORT_I: + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport); + HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport); + break; + case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O: + HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags); + break; + case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM: + case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_tcp_seq_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_tcp_ack_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.inner_tcp_seq_num); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.inner_tcp_ack_num); + break; + case MLX5HWS_DEFINER_FNAME_GTP_TEID: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid); + break; + case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags); + break; + case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.gtpu_first_ext_dw_0); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0); + break; + case MLX5HWS_DEFINER_FNAME_GTPU_DW2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2); + break; + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_2.outer_first_mpls_over_gre); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_2.outer_first_mpls_over_udp); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.geneve_tlv_option_0_data); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_0); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_0); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_1); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_2); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_2); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_id_3); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_4.prog_sample_field_value_3); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_VNI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_flags); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0: + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_next_protocol); + break; + case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.outer_vxlan_gpe_vni); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_OAM: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO: + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.geneve_protocol_type); + break; + case MLX5HWS_DEFINER_FNAME_GENEVE_VNI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni); + break; + case MLX5HWS_DEFINER_FNAME_SOURCE_QP: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn); + break; + case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.source_eswitch_owner_vhca_id); + break; + case MLX5HWS_DEFINER_FNAME_REG_0: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0); + break; + case MLX5HWS_DEFINER_FNAME_REG_1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1); + break; + case MLX5HWS_DEFINER_FNAME_REG_2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2); + break; + case MLX5HWS_DEFINER_FNAME_REG_3: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3); + break; + case MLX5HWS_DEFINER_FNAME_REG_4: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4); + break; + case MLX5HWS_DEFINER_FNAME_REG_5: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5); + break; + case MLX5HWS_DEFINER_FNAME_REG_7: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7); + break; + case MLX5HWS_DEFINER_FNAME_REG_A: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a); + break; + case MLX5HWS_DEFINER_FNAME_GRE_C: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_K: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_S: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present); + break; + case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol); + break; + case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key); + break; + case MLX5HWS_DEFINER_FNAME_ICMP_DW1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code); + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters_3.icmpv6_header_data); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type); + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code); + break; + case MLX5HWS_DEFINER_FNAME_MPLS0_O: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls); + break; + case MLX5HWS_DEFINER_FNAME_MPLS0_I: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_0: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_1: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_2: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2); + break; + case MLX5HWS_DEFINER_FNAME_TNL_HDR_3: + HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3); + break; + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK: + case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK: + /* assuming this is flex parser for geneve option */ + if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) || + (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK && + ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) { + mlx5hws_err(ctx, + "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n", + mlx5hws_definer_fname_to_str(fname), fname, + caps->flex_parser_id_geneve_tlv_option_0); + break; + } + HWS_CLEAR_MATCH_PARAM(mask, + misc_parameters.geneve_tlv_option_0_exist); + break; + case MLX5HWS_DEFINER_FNAME_REG_6: + default: + mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n", + mlx5hws_definer_fname_to_str(fname), fname); + break; + } +} + +static bool +hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 combination_num) +{ + bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0}; + bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0}; + bool is_first_matcher; + int i; + + for (i = 0; i < fc_sz; i++) { + is_first_matcher = !(combination_num & BIT(i)); + if (is_first_matcher) + m1[fc[i].fname] = true; + else + m2[fc[i].fname] = true; + } + + /* Not all the fields can be split into separate matchers. + * Some should be together on the same matcher. + * For example, IPv6 parts - the whole IPv6 address should be on the + * same matcher in order for us to deduce if it's IPv6 or IPv4 address. + */ + if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] && + (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O])) + return false; + + if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] && + (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O])) + return false; + + if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] && + (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] || + m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I])) + return false; + + if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] && + (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] || + m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I])) + return false; + + /* Don't split outer IPv6 dest address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O])) + return false; + + /* Don't split outer IPv6 source address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O])) + return false; + + /* Don't split inner IPv6 dest address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I])) + return false; + + /* Don't split inner IPv6 source address. */ + if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] || + m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) && + (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] || + m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I])) + return false; + + /* Don't split GRE parameters. */ + if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] || + m1[MLX5HWS_DEFINER_FNAME_GRE_K] || + m1[MLX5HWS_DEFINER_FNAME_GRE_S] || + m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) && + (m2[MLX5HWS_DEFINER_FNAME_GRE_C] || + m2[MLX5HWS_DEFINER_FNAME_GRE_K] || + m2[MLX5HWS_DEFINER_FNAME_GRE_S] || + m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL])) + return false; + + /* Don't split TCP ack/seq numbers. */ + if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] || + m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) && + (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] || + m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM])) + return false; + + /* Don't split flex parser. */ + if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] || + m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) && + (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] || + m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7])) + return false; + + return true; +} + +static void +hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx, + struct mlx5hws_match_parameters *m, + struct mlx5hws_match_parameters *m1, + struct mlx5hws_match_parameters *m2, + struct mlx5hws_definer_fc *fc, + int fc_sz, + u32 combination_num) +{ + bool is_first_matcher; + int i; + + memcpy(m1->match_buf, m->match_buf, m->match_sz); + memcpy(m2->match_buf, m->match_buf, m->match_sz); + + for (i = 0; i < fc_sz; i++) { + is_first_matcher = !(combination_num & BIT(i)); + hws_bwc_matcher_complex_params_clear_fld(ctx, + fc[i].fname, + is_first_matcher ? + m2 : m1); + } + + MLX5_SET(fte_match_param, m2->match_buf, + misc_parameters_2.metadata_reg_c_6, -1); +} + +static void +hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1, + struct mlx5hws_match_parameters *mask_2) +{ + kfree(mask_1->match_buf); + kfree(mask_2->match_buf); +} + +static int +hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx, + u8 match_criteria, + struct mlx5hws_match_parameters *mask, + struct mlx5hws_match_parameters *mask_1, + struct mlx5hws_match_parameters *mask_2) +{ + struct mlx5hws_definer_fc *fc; + u32 num_of_combinations; + int fc_sz = 0; + int res = 0; + u32 i; + + if (MLX5_GET(fte_match_param, mask->match_buf, + misc_parameters_2.metadata_reg_c_6)) { + mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n"); + res = -EINVAL; + goto out; + } + + mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + if (!mask_1->match_buf || !mask_2->match_buf) { + mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n"); + res = -ENOMEM; + goto free_params; + } + + mask_1->match_sz = mask->match_sz; + mask_2->match_sz = mask->match_sz; + + fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx, + match_criteria, + mask->match_buf, + &fc_sz); + if (!fc) { + res = -ENOMEM; + goto free_params; + } + + if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) { + mlx5hws_err(ctx, + "Complex matcher: too many match parameters (%d)\n", + fc_sz); + res = -EINVAL; + goto free_fc; + } + + /* We have list of all the match fields from the match parameter. + * Now try all the possibilities of splitting them into two match + * buffers and look for the supported combination. + */ + num_of_combinations = 1 << fc_sz; + + /* Start from combination at index 1 - we know that 0 is unsupported */ + for (i = 1; i < num_of_combinations; i++) { + if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i)) + continue; + + hws_bwc_matcher_complex_params_comb_create(ctx, + mask, mask_1, mask_2, + fc, fc_sz, i); + /* We now have two separate sets of match params. + * Check if each of them can be used in its own matcher. + */ + if (!mlx5hws_bwc_match_params_is_complex(ctx, + match_criteria, + mask_1) && + !mlx5hws_bwc_match_params_is_complex(ctx, + match_criteria, + mask_2)) + break; + } + + if (i == num_of_combinations) { + /* We've scanned all the combinations, but to no avail */ + mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n"); + res = -EINVAL; + goto free_fc; + } + + kfree(fc); + return 0; + +free_fc: + kfree(fc); +free_params: + hws_bwc_matcher_complex_params_destroy(mask_1, mask_2); +out: + return res; +} + +static int +hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table) +{ + struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; + struct mlx5hws_context *ctx = table->ctx; + struct mlx5hws_table_attr tbl_attr = {0}; + struct mlx5hws_table *isolated_tbl; + int ret = 0; + + tbl_attr.type = table->type; + tbl_attr.level = table->level; + + bwc_matcher->complex->isolated_tbl = + mlx5hws_table_create(ctx, &tbl_attr); + isolated_tbl = bwc_matcher->complex->isolated_tbl; + if (!isolated_tbl) + return -EINVAL; + + /* Set the default miss of the isolated table to + * point to the end anchor of the original matcher. + */ + mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, + isolated_tbl->fw_ft_type, + isolated_tbl->type, + &ft_attr); + ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id; + + ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, + &ft_attr, + isolated_tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n"); + goto destroy_tbl; + } + + return 0; + +destroy_tbl: + mlx5hws_table_destroy(isolated_tbl); + return ret; +} + +static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl) +{ + /* This table is isolated - no table is pointing to it, no need to + * disconnect it from anywhere, it won't affect any other table's miss. + */ + mlx5hws_table_destroy(isolated_tbl); +} + +static int +hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table, + u8 match_criteria_enable, + struct mlx5hws_match_parameters *mask) +{ + struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_context *ctx = table->ctx; + int ret; + + isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL); + if (!isolated_bwc_matcher) + return -ENOMEM; + + bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher; + + /* Isolated BWC matcher needs access to the first BWC matcher */ + isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher; + + /* Isolated matcher needs to match on REG_C_6, + * so make sure its criteria bit is on. + */ + match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2; + + ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher, + isolated_tbl, + 0, + match_criteria_enable, + mask, + NULL); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n"); + goto free_matcher; + } + + return 0; + +free_matcher: + kfree(bwc_matcher->complex->isolated_bwc_matcher); + return ret; +} + +static void +hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + kfree(bwc_matcher); +} + +static int +hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher, + struct mlx5hws_table *table) +{ + struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl; + u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0}; + struct mlx5hws_context *ctx = table->ctx; + struct mlx5hws_action_mh_pattern ptrn; + int ret = 0; + + /* Create action to jump to isolated table */ + + bwc_matcher->complex->action_go_to_tbl = + mlx5hws_action_create_dest_table(ctx, + isolated_tbl, + MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_go_to_tbl) { + mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n"); + return -EINVAL; + } + + /* Create modify header action to set REG_C_6 */ + + MLX5_SET(set_action_in, modify_hdr_action, + action_type, MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, modify_hdr_action, + field, MLX5_MODI_META_REG_C_6); + MLX5_SET(set_action_in, modify_hdr_action, + length, 0); /* zero means length of 32 */ + MLX5_SET(set_action_in, modify_hdr_action, offset, 0); + MLX5_SET(set_action_in, modify_hdr_action, data, 0); + + ptrn.data = (void *)modify_hdr_action; + ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE; + + bwc_matcher->complex->action_metadata = + mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0, + MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_metadata) { + ret = -EINVAL; + goto destroy_action_go_to_tbl; + } + + /* Create last action */ + + bwc_matcher->complex->action_last = + mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB); + if (!bwc_matcher->complex->action_last) { + mlx5hws_err(ctx, "Complex matcher: failed to create last action\n"); + ret = -EINVAL; + goto destroy_action_metadata; + } + + return 0; + +destroy_action_metadata: + mlx5hws_action_destroy(bwc_matcher->complex->action_metadata); +destroy_action_go_to_tbl: + mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl); + return ret; +} + +static void +hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mlx5hws_action_destroy(bwc_matcher->complex->action_last); + mlx5hws_action_destroy(bwc_matcher->complex->action_metadata); + mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl); +} + int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher, struct mlx5hws_table *table, u32 priority, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask) { - mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n"); - return -EOPNOTSUPP; + enum mlx5hws_action_type complex_init_action_types[3]; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_match_parameters mask_1 = {0}; + struct mlx5hws_match_parameters mask_2 = {0}; + struct mlx5hws_context *ctx = table->ctx; + int ret; + + ret = hws_bwc_matcher_complex_params_create(table->ctx, + match_criteria_enable, + mask, &mask_1, &mask_2); + if (ret) + goto err; + + bwc_matcher->complex = + kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL); + if (!bwc_matcher->complex) { + ret = -ENOMEM; + goto free_masks; + } + + ret = rhashtable_init(&bwc_matcher->complex->refcount_hash, + &hws_refcount_hash); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n"); + goto free_complex; + } + + mutex_init(&bwc_matcher->complex->hash_lock); + ida_init(&bwc_matcher->complex->metadata_ida); + + /* Create initial action template for the first matcher. + * Usually the initial AT is just dummy, but in case of complex + * matcher we know exactly which actions should it have. + */ + + complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR; + complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL; + complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST; + + /* Create the first matcher */ + + ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher, + table, + priority, + match_criteria_enable, + &mask_1, + complex_init_action_types); + if (ret) + goto destroy_ida; + + /* Create isolated table to hold the second isolated matcher */ + + ret = hws_bwc_isolated_table_create(bwc_matcher, table); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n"); + goto destroy_first_matcher; + } + + /* Now create the second BWC matcher - the isolated one */ + + ret = hws_bwc_isolated_matcher_create(bwc_matcher, table, + match_criteria_enable, &mask_2); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n"); + goto destroy_isolated_tbl; + } + + /* Create action for isolated matcher's rules */ + + ret = hws_bwc_isolated_actions_create(bwc_matcher, table); + if (ret) { + mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n"); + goto destroy_isolated_matcher; + } + + hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2); + return 0; + +destroy_isolated_matcher: + isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher; + hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher); +destroy_isolated_tbl: + hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl); +destroy_first_matcher: + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); +destroy_ida: + ida_destroy(&bwc_matcher->complex->metadata_ida); + mutex_destroy(&bwc_matcher->complex->hash_lock); + rhashtable_destroy(&bwc_matcher->complex->refcount_hash); +free_complex: + kfree(bwc_matcher->complex); + bwc_matcher->complex = NULL; +free_masks: + hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2); +err: + return ret; } void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher) { - /* nothing to do here */ + struct mlx5hws_bwc_matcher *isolated_bwc_matcher = + bwc_matcher->complex->isolated_bwc_matcher; + + hws_bwc_isolated_actions_destroy(bwc_matcher); + hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher); + hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl); + mlx5hws_bwc_matcher_destroy_simple(bwc_matcher); + ida_destroy(&bwc_matcher->complex->metadata_ida); + mutex_destroy(&bwc_matcher->complex->hash_lock); + rhashtable_destroy(&bwc_matcher->complex->refcount_hash); + kfree(bwc_matcher->complex); + bwc_matcher->complex = NULL; +} + +static void +hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mutex_lock(&bwc_matcher->complex->hash_lock); +} + +static void +hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + mutex_unlock(&bwc_matcher->complex->hash_lock); +} + +static int +hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule, + struct mlx5hws_match_parameters *params) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node; + struct rhashtable *refcount_hash; + int i; + + bwc_rule->complex_hash_node = NULL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) + return -ENOMEM; + + node->tag = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL); + refcount_set(&node->refcount, 1); + + /* Clear match buffer - turn off all the unrelated fields + * in accordance with the match params mask for the first + * matcher out of the two parts of the complex matcher. + * The resulting mask is the key for the hash. + */ + for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++) + node->match_buf[i] = params->match_buf[i] & + bwc_matcher->mt->match_param[i]; + + refcount_hash = &bwc_matcher->complex->refcount_hash; + old_node = rhashtable_lookup_get_insert_fast(refcount_hash, + &node->hash_node, + hws_refcount_hash); + if (old_node) { + /* Rule with the same tag already exists - update refcount */ + refcount_inc(&old_node->refcount); + /* Let the new rule use the same tag as the existing rule. + * Note that we don't have any indication for the rule creation + * process that a rule with similar matching params already + * exists - no harm done when this rule is be overwritten by + * the same STE. + * There's some performance advantage in skipping such cases, + * so this is left for future optimizations. + */ + ida_free(&bwc_matcher->complex->metadata_ida, node->tag); + kfree(node); + node = old_node; + } + + bwc_rule->complex_hash_node = node; + return 0; +} + +static void +hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule, + bool *is_last_rule) +{ + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_bwc_complex_rule_hash_node *node; + + if (is_last_rule) + *is_last_rule = false; + + node = bwc_rule->complex_hash_node; + if (refcount_dec_and_test(&node->refcount)) { + rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash, + &node->hash_node, + hws_refcount_hash); + ida_free(&bwc_matcher->complex->metadata_ida, node->tag); + kfree(node); + if (is_last_rule) + *is_last_rule = true; + } + + bwc_rule->complex_hash_node = NULL; } int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, @@ -70,19 +1144,271 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule, struct mlx5hws_rule_action rule_actions[], u16 bwc_queue_idx) { - mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx, - "Complex rule is not supported yet\n"); - return -EOPNOTSUPP; + struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0}; + struct mlx5hws_rule_action rule_actions_1[3] = {0}; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + u32 *match_buf_2; + u32 metadata_val; + int ret = 0; + + isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher; + bwc_rule->isolated_bwc_rule = + mlx5hws_bwc_rule_alloc(isolated_bwc_matcher); + if (unlikely(!bwc_rule->isolated_bwc_rule)) + return -ENOMEM; + + hws_bwc_matcher_complex_hash_lock(bwc_matcher); + + /* Get a new hash node for this complex rule. + * If this is a unique set of match params for the first matcher, + * we will get a new hash node with newly allocated IDA. + * Otherwise we will get an existing node with IDA and updated refcount. + */ + ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params); + if (unlikely(ret)) { + mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n"); + goto free_isolated_rule; + } + + /* No need to clear match buffer's fields in accordance to what + * will actually be matched on first and second matchers. + * Both matchers were created with the appropriate masks + * and each of them holds the appropriate field copy array, + * so rule creation will use only the fields that will be copied + * in accordance with setters in field copy array. + * We do, however, need to temporary allocate match buffer + * for the second (isolated) rule in order to not modify + * user's match params buffer. + */ + + match_buf_2 = kmemdup(params->match_buf, + MLX5_ST_SZ_BYTES(fte_match_param), + GFP_KERNEL); + if (unlikely(!match_buf_2)) { + mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n"); + ret = -ENOMEM; + goto hash_node_put; + } + + /* On 2nd matcher, use unique 32-bit ID as a matching tag */ + metadata_val = bwc_rule->complex_hash_node->tag; + MLX5_SET(fte_match_param, match_buf_2, + misc_parameters_2.metadata_reg_c_6, metadata_val); + + /* Isolated rule's rule_actions contain all the original actions */ + ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule, + match_buf_2, + rule_actions, + flow_source, + bwc_queue_idx); + kfree(match_buf_2); + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Complex rule: failed creating isolated BWC rule (%d)\n", + ret); + goto hash_node_put; + } + + /* First rule's rule_actions contain setting metadata and + * jump to isolated table that contains the second matcher. + * Set metadata value to a unique value for this rule. + */ + + MLX5_SET(set_action_in, modify_hdr_action, + action_type, MLX5_MODIFICATION_TYPE_SET); + MLX5_SET(set_action_in, modify_hdr_action, + field, MLX5_MODI_META_REG_C_6); + MLX5_SET(set_action_in, modify_hdr_action, + length, 0); /* zero means length of 32 */ + MLX5_SET(set_action_in, modify_hdr_action, + offset, 0); + MLX5_SET(set_action_in, modify_hdr_action, + data, metadata_val); + + rule_actions_1[0].action = bwc_matcher->complex->action_metadata; + rule_actions_1[0].modify_header.offset = 0; + rule_actions_1[0].modify_header.data = modify_hdr_action; + + rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl; + rule_actions_1[2].action = bwc_matcher->complex->action_last; + + ret = mlx5hws_bwc_rule_create_simple(bwc_rule, + params->match_buf, + rule_actions_1, + flow_source, + bwc_queue_idx); + + if (unlikely(ret)) { + mlx5hws_err(ctx, + "Complex rule: failed creating first BWC rule (%d)\n", + ret); + goto destroy_isolated_rule; + } + + hws_bwc_matcher_complex_hash_unlock(bwc_matcher); + + return 0; + +destroy_isolated_rule: + mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule); +hash_node_put: + hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL); +free_isolated_rule: + hws_bwc_matcher_complex_hash_unlock(bwc_matcher); + mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule); + return ret; } int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule) { - return 0; + struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_bwc_rule *isolated_bwc_rule; + int ret_isolated, ret; + bool is_last_rule; + + hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher); + + hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule); + bwc_rule->rule->skip_delete = !is_last_rule; + + ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule); + if (unlikely(ret)) + mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n"); + + isolated_bwc_rule = bwc_rule->isolated_bwc_rule; + ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule); + if (unlikely(ret_isolated)) + mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n"); + + hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher); + + mlx5hws_bwc_rule_free(isolated_bwc_rule); + + return ret || ret_isolated; +} + +static void +hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher) +{ + struct mlx5hws_bwc_complex_rule_hash_node *node; + struct rhashtable_iter iter; + + rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter); + rhashtable_walk_start(&iter); + + while ((node = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(node)) + continue; + node->rtc_valid = false; + } + + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); } -int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) +int +mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher) { - mlx5hws_err(bwc_matcher->matcher->tbl->ctx, - "Moving complex rule is not supported yet\n"); - return -EOPNOTSUPP; + struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx; + struct mlx5hws_matcher *matcher = bwc_matcher->matcher; + bool move_error = false, poll_error = false; + u16 bwc_queues = mlx5hws_bwc_queues(ctx); + struct mlx5hws_bwc_rule *tmp_bwc_rule; + struct mlx5hws_rule_attr rule_attr; + struct mlx5hws_table *isolated_tbl; + struct mlx5hws_rule *tmp_rule; + struct list_head *rules_list; + u32 expected_completions = 1; + u32 end_ft_id; + int i, ret; + + /* We are rehashing the matcher that is the first part of the complex + * matcher. Need to update the isolated matcher to point to the end_ft + * of this new matcher. This needs to be done before moving any rules + * to prevent possible steering loops. + */ + isolated_tbl = bwc_matcher->complex->isolated_tbl; + end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id; + ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id); + if (ret) { + mlx5hws_err(ctx, + "Failed updating end_ft of isolated matcher (%d)\n", + ret); + return ret; + } + + hws_bwc_matcher_clear_hash_rtcs(bwc_matcher); + + mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr); + + for (i = 0; i < bwc_queues; i++) { + rules_list = &bwc_matcher->rules[i]; + if (list_empty(rules_list)) + continue; + + rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i); + + list_for_each_entry(tmp_bwc_rule, rules_list, list_node) { + /* Check if a rule with similar tag has already + * been moved. + */ + if (tmp_bwc_rule->complex_hash_node->rtc_valid) { + /* This rule is a duplicate of rule with similar + * tag that has already been moved earlier. + * Just update this rule's RTCs. + */ + tmp_bwc_rule->rule->rtc_0 = + tmp_bwc_rule->complex_hash_node->rtc_0; + tmp_bwc_rule->rule->rtc_1 = + tmp_bwc_rule->complex_hash_node->rtc_1; + tmp_bwc_rule->rule->matcher = + tmp_bwc_rule->rule->matcher->resize_dst; + continue; + } + + /* First time we're moving rule with this tag. + * Move it for real. + */ + tmp_rule = tmp_bwc_rule->rule; + tmp_rule->skip_delete = false; + ret = mlx5hws_matcher_resize_rule_move(matcher, + tmp_rule, + &rule_attr); + if (unlikely(ret && !move_error)) { + mlx5hws_err(ctx, + "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n", + ret); + move_error = true; + } + + expected_completions = 1; + ret = mlx5hws_bwc_queue_poll(ctx, + rule_attr.queue_id, + &expected_completions, + true); + if (unlikely(ret && !poll_error)) { + mlx5hws_err(ctx, + "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n", + ret); + poll_error = true; + } + + /* Done moving the rule to the new matcher, + * now update RTCs for all the duplicated rules. + */ + tmp_bwc_rule->complex_hash_node->rtc_0 = + tmp_bwc_rule->rule->rtc_0; + tmp_bwc_rule->complex_hash_node->rtc_1 = + tmp_bwc_rule->rule->rtc_1; + + tmp_bwc_rule->complex_hash_node->rtc_valid = true; + } + } + + if (move_error || poll_error) + ret = -EINVAL; + + return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h index 340f0688e394..a6887c7e39d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h @@ -4,6 +4,27 @@ #ifndef HWS_BWC_COMPLEX_H_ #define HWS_BWC_COMPLEX_H_ +struct mlx5hws_bwc_complex_rule_hash_node { + u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM]; + u32 tag; + refcount_t refcount; + bool rtc_valid; + u32 rtc_0; + u32 rtc_1; + struct rhash_head hash_node; +}; + +struct mlx5hws_bwc_matcher_complex_data { + struct mlx5hws_table *isolated_tbl; + struct mlx5hws_bwc_matcher *isolated_bwc_matcher; + struct mlx5hws_action *action_metadata; + struct mlx5hws_action *action_go_to_tbl; + struct mlx5hws_action *action_last; + struct rhashtable refcount_hash; + struct mutex hash_lock; /* Protect the refcount rhashtable */ + struct ida metadata_ida; +}; + bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx, u8 match_criteria_enable, struct mlx5hws_match_parameters *mask); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c index e8f98c109b99..9c83753e4592 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c @@ -406,7 +406,6 @@ int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1); MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base); MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base); - MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset); MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id); MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h index 51d9e0291ac1..fa6bff210266 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h @@ -70,7 +70,6 @@ struct mlx5hws_cmd_rtc_create_attr { u32 pd; u32 stc_base; u32 ste_base; - u32 ste_offset; u32 miss_ft_id; bool fw_gen_wqe; u8 update_index_mode; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c index 9cda2774fd64..428dae869706 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c @@ -34,7 +34,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx) /* Create an STC pool per FT type */ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC; - pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL; max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran); @@ -159,10 +158,16 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx, if (ret) goto pools_uninit; + ret = mlx5hws_action_ste_pool_init(ctx); + if (ret) + goto close_queues; + INIT_LIST_HEAD(&ctx->tbl_list); return 0; +close_queues: + mlx5hws_send_queues_close(ctx); pools_uninit: hws_context_pools_uninit(ctx); uninit_pd: @@ -175,6 +180,7 @@ static void hws_context_uninit_hws(struct mlx5hws_context *ctx) if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) return; + mlx5hws_action_ste_pool_uninit(ctx); mlx5hws_send_queues_close(ctx); hws_context_pools_uninit(ctx); hws_context_uninit_pd(ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h index 38c3647444ad..3f8938c73dc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h @@ -39,6 +39,8 @@ struct mlx5hws_context { struct mlx5hws_cmd_query_caps *caps; u32 pd_num; struct mlx5hws_pool *stc_pool; + struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */ + struct delayed_work action_ste_cleanup; struct mlx5hws_context_common_res common_res; struct mlx5hws_pattern_cache *pattern_cache; struct mlx5hws_definer_cache *definer_cache; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c index 696275fd0ce2..91568d6c1dac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c @@ -118,7 +118,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma { enum mlx5hws_table_type tbl_type = matcher->tbl->type; struct mlx5hws_cmd_ft_query_attr ft_attr = {0}; - struct mlx5hws_pool_chunk *ste; struct mlx5hws_pool *ste_pool; u64 icm_addr_0 = 0; u64 icm_addr_1 = 0; @@ -134,12 +133,11 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma matcher->end_ft_id, matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0); - ste = &matcher->match_ste.ste; ste_pool = matcher->match_ste.pool; if (ste_pool) { - ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + ste_0_id = mlx5hws_pool_get_base_id(ste_pool); if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) - ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + ste_1_id = mlx5hws_pool_get_base_mirror_id(ste_pool); } seq_printf(f, ",%d,%d,%d,%d", @@ -148,19 +146,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma matcher->match_ste.rtc_1_id, (int)ste_1_id); - ste = &matcher->action_ste.ste; - ste_pool = matcher->action_ste.pool; - if (ste_pool) { - ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); - if (tbl_type == MLX5HWS_TABLE_TYPE_FDB) - ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); - else - ste_1_id = -1; - } else { - ste_0_id = -1; - ste_1_id = -1; - } - ft_attr.type = matcher->tbl->fw_ft_type; ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev, matcher->end_ft_id, @@ -170,10 +155,7 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma if (ret) return ret; - seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n", - matcher->action_ste.rtc_0_id, (int)ste_0_id, - matcher->action_ste.rtc_1_id, (int)ste_1_id, - 0, + seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n", mlx5hws_debug_icm_to_idx(icm_addr_0), mlx5hws_debug_icm_to_idx(icm_addr_1)); @@ -387,14 +369,17 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context if (!stc_pool) return 0; - if (stc_pool->resource[0]) { - ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]); + if (stc_pool->resource) { + ret = hws_debug_dump_context_stc_resource(f, ctx, + stc_pool->resource); if (ret) return ret; } - if (stc_pool->mirror_resource[0]) { - ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]); + if (stc_pool->mirror_resource) { + struct mlx5hws_pool_resource *res = stc_pool->mirror_resource; + + ret = hws_debug_dump_context_stc_resource(f, ctx, res); if (ret) return ret; } @@ -402,10 +387,41 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context return 0; } +static void +hws_debug_dump_action_ste_table(struct seq_file *f, + struct mlx5hws_action_ste_table *action_tbl) +{ + int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool); + int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool); + + seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n", + MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE, + HWS_PTR_TO_ID(action_tbl), + action_tbl->rtc_0_id, ste_0_id, + action_tbl->rtc_1_id, ste_1_id); +} + +static void hws_debug_dump_action_ste_pool(struct seq_file *f, + struct mlx5hws_action_ste_pool *pool) +{ + struct mlx5hws_action_ste_table *action_tbl; + enum mlx5hws_pool_optimize opt; + + mutex_lock(&pool->lock); + for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX; + opt++) { + list_for_each_entry(action_tbl, &pool->elems[opt].available, + list_node) { + hws_debug_dump_action_ste_table(f, action_tbl); + } + } + mutex_unlock(&pool->lock); +} + static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx) { struct mlx5hws_table *tbl; - int ret; + int ret, i; ret = hws_debug_dump_context_info(f, ctx); if (ret) @@ -425,6 +441,9 @@ static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ct return ret; } + for (i = 0; i < ctx->queues; i++) + hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h index e44e7ae28f93..89c396f9f266 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h @@ -26,6 +26,8 @@ enum mlx5hws_debug_res_type { MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205, MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206, MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207, + + MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300, }; static inline u64 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c index c8cc0c8115f5..5cc0dc002ac1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c @@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data { u32 match_flags; }; +#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name + +static const char * const hws_definer_fname_to_str[] = { + HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O), + HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I), + HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O), + HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I), + HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O), + HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I), + HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O), + HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I), + HWS_DEFINER_ENTRY(ETH_TYPE_O), + HWS_DEFINER_ENTRY(ETH_TYPE_I), + HWS_DEFINER_ENTRY(ETH_L3_TYPE_O), + HWS_DEFINER_ENTRY(ETH_L3_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_TYPE_O), + HWS_DEFINER_ENTRY(VLAN_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O), + HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I), + HWS_DEFINER_ENTRY(VLAN_CFI_O), + HWS_DEFINER_ENTRY(VLAN_CFI_I), + HWS_DEFINER_ENTRY(VLAN_ID_O), + HWS_DEFINER_ENTRY(VLAN_ID_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I), + HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O), + HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I), + HWS_DEFINER_ENTRY(IPV4_IHL_O), + HWS_DEFINER_ENTRY(IPV4_IHL_I), + HWS_DEFINER_ENTRY(IP_DSCP_O), + HWS_DEFINER_ENTRY(IP_DSCP_I), + HWS_DEFINER_ENTRY(IP_ECN_O), + HWS_DEFINER_ENTRY(IP_ECN_I), + HWS_DEFINER_ENTRY(IP_TTL_O), + HWS_DEFINER_ENTRY(IP_TTL_I), + HWS_DEFINER_ENTRY(IPV4_DST_O), + HWS_DEFINER_ENTRY(IPV4_DST_I), + HWS_DEFINER_ENTRY(IPV4_SRC_O), + HWS_DEFINER_ENTRY(IPV4_SRC_I), + HWS_DEFINER_ENTRY(IP_VERSION_O), + HWS_DEFINER_ENTRY(IP_VERSION_I), + HWS_DEFINER_ENTRY(IP_FRAG_O), + HWS_DEFINER_ENTRY(IP_FRAG_I), + HWS_DEFINER_ENTRY(IP_LEN_O), + HWS_DEFINER_ENTRY(IP_LEN_I), + HWS_DEFINER_ENTRY(IP_TOS_O), + HWS_DEFINER_ENTRY(IP_TOS_I), + HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O), + HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I), + HWS_DEFINER_ENTRY(IPV6_DST_127_96_O), + HWS_DEFINER_ENTRY(IPV6_DST_95_64_O), + HWS_DEFINER_ENTRY(IPV6_DST_63_32_O), + HWS_DEFINER_ENTRY(IPV6_DST_31_0_O), + HWS_DEFINER_ENTRY(IPV6_DST_127_96_I), + HWS_DEFINER_ENTRY(IPV6_DST_95_64_I), + HWS_DEFINER_ENTRY(IPV6_DST_63_32_I), + HWS_DEFINER_ENTRY(IPV6_DST_31_0_I), + HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O), + HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O), + HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O), + HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O), + HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I), + HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I), + HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I), + HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I), + HWS_DEFINER_ENTRY(IP_PROTOCOL_O), + HWS_DEFINER_ENTRY(IP_PROTOCOL_I), + HWS_DEFINER_ENTRY(L4_SPORT_O), + HWS_DEFINER_ENTRY(L4_SPORT_I), + HWS_DEFINER_ENTRY(L4_DPORT_O), + HWS_DEFINER_ENTRY(L4_DPORT_I), + HWS_DEFINER_ENTRY(TCP_FLAGS_I), + HWS_DEFINER_ENTRY(TCP_FLAGS_O), + HWS_DEFINER_ENTRY(TCP_SEQ_NUM), + HWS_DEFINER_ENTRY(TCP_ACK_NUM), + HWS_DEFINER_ENTRY(GTP_TEID), + HWS_DEFINER_ENTRY(GTP_MSG_TYPE), + HWS_DEFINER_ENTRY(GTP_EXT_FLAG), + HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR), + HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU), + HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI), + HWS_DEFINER_ENTRY(GTPU_DW0), + HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0), + HWS_DEFINER_ENTRY(GTPU_DW2), + HWS_DEFINER_ENTRY(FLEX_PARSER_0), + HWS_DEFINER_ENTRY(FLEX_PARSER_1), + HWS_DEFINER_ENTRY(FLEX_PARSER_2), + HWS_DEFINER_ENTRY(FLEX_PARSER_3), + HWS_DEFINER_ENTRY(FLEX_PARSER_4), + HWS_DEFINER_ENTRY(FLEX_PARSER_5), + HWS_DEFINER_ENTRY(FLEX_PARSER_6), + HWS_DEFINER_ENTRY(FLEX_PARSER_7), + HWS_DEFINER_ENTRY(VPORT_REG_C_0), + HWS_DEFINER_ENTRY(VXLAN_FLAGS), + HWS_DEFINER_ENTRY(VXLAN_VNI), + HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS), + HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0), + HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO), + HWS_DEFINER_ENTRY(VXLAN_GPE_VNI), + HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1), + HWS_DEFINER_ENTRY(GENEVE_OPT_LEN), + HWS_DEFINER_ENTRY(GENEVE_OAM), + HWS_DEFINER_ENTRY(GENEVE_PROTO), + HWS_DEFINER_ENTRY(GENEVE_VNI), + HWS_DEFINER_ENTRY(SOURCE_QP), + HWS_DEFINER_ENTRY(SOURCE_GVMI), + HWS_DEFINER_ENTRY(REG_0), + HWS_DEFINER_ENTRY(REG_1), + HWS_DEFINER_ENTRY(REG_2), + HWS_DEFINER_ENTRY(REG_3), + HWS_DEFINER_ENTRY(REG_4), + HWS_DEFINER_ENTRY(REG_5), + HWS_DEFINER_ENTRY(REG_6), + HWS_DEFINER_ENTRY(REG_7), + HWS_DEFINER_ENTRY(REG_8), + HWS_DEFINER_ENTRY(REG_9), + HWS_DEFINER_ENTRY(REG_10), + HWS_DEFINER_ENTRY(REG_11), + HWS_DEFINER_ENTRY(REG_A), + HWS_DEFINER_ENTRY(REG_B), + HWS_DEFINER_ENTRY(GRE_KEY_PRESENT), + HWS_DEFINER_ENTRY(GRE_C), + HWS_DEFINER_ENTRY(GRE_K), + HWS_DEFINER_ENTRY(GRE_S), + HWS_DEFINER_ENTRY(GRE_PROTOCOL), + HWS_DEFINER_ENTRY(GRE_OPT_KEY), + HWS_DEFINER_ENTRY(GRE_OPT_SEQ), + HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM), + HWS_DEFINER_ENTRY(INTEGRITY_O), + HWS_DEFINER_ENTRY(INTEGRITY_I), + HWS_DEFINER_ENTRY(ICMP_DW1), + HWS_DEFINER_ENTRY(ICMP_DW2), + HWS_DEFINER_ENTRY(ICMP_DW3), + HWS_DEFINER_ENTRY(IPSEC_SPI), + HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER), + HWS_DEFINER_ENTRY(IPSEC_SYNDROME), + HWS_DEFINER_ENTRY(MPLS0_O), + HWS_DEFINER_ENTRY(MPLS1_O), + HWS_DEFINER_ENTRY(MPLS2_O), + HWS_DEFINER_ENTRY(MPLS3_O), + HWS_DEFINER_ENTRY(MPLS4_O), + HWS_DEFINER_ENTRY(MPLS0_I), + HWS_DEFINER_ENTRY(MPLS1_I), + HWS_DEFINER_ENTRY(MPLS2_I), + HWS_DEFINER_ENTRY(MPLS3_I), + HWS_DEFINER_ENTRY(MPLS4_I), + HWS_DEFINER_ENTRY(FLEX_PARSER0_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER1_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER2_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER3_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER4_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER5_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER6_OK), + HWS_DEFINER_ENTRY(FLEX_PARSER7_OK), + HWS_DEFINER_ENTRY(OKS2_MPLS0_O), + HWS_DEFINER_ENTRY(OKS2_MPLS1_O), + HWS_DEFINER_ENTRY(OKS2_MPLS2_O), + HWS_DEFINER_ENTRY(OKS2_MPLS3_O), + HWS_DEFINER_ENTRY(OKS2_MPLS4_O), + HWS_DEFINER_ENTRY(OKS2_MPLS0_I), + HWS_DEFINER_ENTRY(OKS2_MPLS1_I), + HWS_DEFINER_ENTRY(OKS2_MPLS2_I), + HWS_DEFINER_ENTRY(OKS2_MPLS3_I), + HWS_DEFINER_ENTRY(OKS2_MPLS4_I), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6), + HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6), + HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7), + HWS_DEFINER_ENTRY(IB_L4_OPCODE), + HWS_DEFINER_ENTRY(IB_L4_QPN), + HWS_DEFINER_ENTRY(IB_L4_A), + HWS_DEFINER_ENTRY(RANDOM_NUM), + HWS_DEFINER_ENTRY(PTYPE_L2_O), + HWS_DEFINER_ENTRY(PTYPE_L2_I), + HWS_DEFINER_ENTRY(PTYPE_L3_O), + HWS_DEFINER_ENTRY(PTYPE_L3_I), + HWS_DEFINER_ENTRY(PTYPE_L4_O), + HWS_DEFINER_ENTRY(PTYPE_L4_I), + HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O), + HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I), + HWS_DEFINER_ENTRY(PTYPE_FRAG_O), + HWS_DEFINER_ENTRY(PTYPE_FRAG_I), + HWS_DEFINER_ENTRY(TNL_HDR_0), + HWS_DEFINER_ENTRY(TNL_HDR_1), + HWS_DEFINER_ENTRY(TNL_HDR_2), + HWS_DEFINER_ENTRY(TNL_HDR_3), + [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN", +}; + +const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname) +{ + if (fname > MLX5HWS_DEFINER_FNAME_MAX) + fname = MLX5HWS_DEFINER_FNAME_MAX; + return hws_definer_fname_to_str[fname]; +} + static void hws_definer_ones_set(struct mlx5hws_definer_fc *fc, void *match_param, @@ -509,7 +721,7 @@ static int hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, u32 *match_param) { - bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set; struct mlx5hws_definer_fc *fc = cd->fc; struct mlx5hws_definer_fc *curr_fc; u32 *s_ipv6, *d_ipv6; @@ -521,6 +733,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, return -EINVAL; } + ip_addr_set = HWS_IS_FLD_SET_SZ(match_param, + outer_headers.src_ipv4_src_ipv6, + 0x80) || + HWS_IS_FLD_SET_SZ(match_param, + outer_headers.dst_ipv4_dst_ipv6, 0x80); + ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) || + HWS_IS_FLD_SET(match_param, outer_headers.ethertype); + + if (ip_addr_set && !ip_ver_set) { + mlx5hws_err(cd->ctx, + "Unsupported match on IP address without version or ethertype\n"); + return -EINVAL; + } + /* L2 Check ethertype */ HWS_SET_HDR(fc, match_param, ETH_TYPE_O, outer_headers.ethertype, @@ -570,10 +796,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout); /* Assume IPv6 is used if ipv6 bits are set */ - is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; - is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] || + d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + + /* IHL is an IPv4-specific field. */ + if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) { + mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n"); + return -EINVAL; + } - if (is_s_ipv6) { + if (is_ipv6) { /* Handle IPv6 source address */ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O, outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -587,13 +819,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O, outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_src_outer.ipv6_address_31_0); - } else { - /* Handle IPv4 source address */ - HWS_SET_HDR(fc, match_param, IPV4_SRC_O, - outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, - ipv4_src_dest_outer.source_address); - } - if (is_d_ipv6) { /* Handle IPv6 destination address */ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -608,6 +833,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_dst_outer.ipv6_address_31_0); } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_O, + outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_outer.source_address); /* Handle IPv4 destination address */ HWS_SET_HDR(fc, match_param, IPV4_DST_O, outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, @@ -665,7 +894,7 @@ static int hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, u32 *match_param) { - bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set; + bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set; struct mlx5hws_definer_fc *fc = cd->fc; struct mlx5hws_definer_fc *curr_fc; u32 *s_ipv6, *d_ipv6; @@ -677,6 +906,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, return -EINVAL; } + ip_addr_set = HWS_IS_FLD_SET_SZ(match_param, + inner_headers.src_ipv4_src_ipv6, + 0x80) || + HWS_IS_FLD_SET_SZ(match_param, + inner_headers.dst_ipv4_dst_ipv6, 0x80); + ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) || + HWS_IS_FLD_SET(match_param, inner_headers.ethertype); + + if (ip_addr_set && !ip_ver_set) { + mlx5hws_err(cd->ctx, + "Unsupported match on IP address without version or ethertype\n"); + return -EINVAL; + } + /* L2 Check ethertype */ HWS_SET_HDR(fc, match_param, ETH_TYPE_I, inner_headers.ethertype, @@ -728,10 +971,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, inner_headers.dst_ipv4_dst_ipv6.ipv6_layout); /* Assume IPv6 is used if ipv6 bits are set */ - is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2]; - is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; + is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] || + d_ipv6[0] || d_ipv6[1] || d_ipv6[2]; - if (is_s_ipv6) { + /* IHL is an IPv4-specific field. */ + if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) { + mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n"); + return -EINVAL; + } + + if (is_ipv6) { /* Handle IPv6 source address */ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I, inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -745,13 +994,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I, inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_src_inner.ipv6_address_31_0); - } else { - /* Handle IPv4 source address */ - HWS_SET_HDR(fc, match_param, IPV4_SRC_I, - inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, - ipv4_src_dest_inner.source_address); - } - if (is_d_ipv6) { /* Handle IPv6 destination address */ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96, @@ -766,6 +1008,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, ipv6_dst_inner.ipv6_address_31_0); } else { + /* Handle IPv4 source address */ + HWS_SET_HDR(fc, match_param, IPV4_SRC_I, + inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0, + ipv4_src_dest_inner.source_address); /* Handle IPv4 destination address */ HWS_SET_HDR(fc, match_param, IPV4_DST_I, inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h index 5c1a2086efba..62da55389331 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h @@ -831,4 +831,6 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx, u32 *match_param, int *fc_sz); +const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname); + #endif /* HWS_DEFINER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c index 1b787cd66e6f..9d1c0e4b224a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c @@ -1081,13 +1081,8 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns, struct mlx5hws_bwc_rule *rule; int err = 0; - if (mlx5_fs_cmd_is_fw_term_table(ft)) { - /* Packet reformat on terminamtion table not supported yet */ - if (fte->act_dests.action.action & - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) - return -EOPNOTSUPP; + if (mlx5_fs_cmd_is_fw_term_table(ft)) return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte); - } err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions); if (err) @@ -1362,7 +1357,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, pkt_reformat->fs_hws_action.pr_data = pr_data; } - pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW; + pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS; pkt_reformat->fs_hws_action.hws_action = hws_action; return 0; @@ -1380,6 +1375,15 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace struct mlx5_fs_hws_pr *pr_data; struct mlx5_fs_pool *pr_pool; + if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) { + struct mlx5_pkt_reformat fw_pkt_reformat = { 0 }; + + fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id; + mlx5_fs_cmd_get_fw_cmds()-> + packet_reformat_dealloc(ns, &fw_pkt_reformat); + pkt_reformat->fs_hws_action.fw_reformat_id = 0; + } + if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) return; @@ -1499,6 +1503,7 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns, err = -ENOMEM; goto release_mh; } + mutex_init(&modify_hdr->fs_hws_action.lock); modify_hdr->fs_hws_action.mh_data = mh_data; modify_hdr->fs_hws_action.fs_pool = pool; modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW; @@ -1532,6 +1537,58 @@ static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace * modify_hdr->fs_hws_action.mh_data = NULL; } +int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) +{ + enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type; + struct mutex *lock = &pkt_reformat->fs_hws_action.lock; + u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id; + struct mlx5_pkt_reformat fw_pkt_reformat = { 0 }; + struct mlx5_pkt_reformat_params params = { 0 }; + struct mlx5_flow_root_namespace *ns; + struct mlx5_core_dev *dev; + int ret; + + mutex_lock(lock); + + if (*id != 0) { + *reformat_id = *id; + ret = 0; + goto unlock; + } + + dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action); + if (!dev) { + ret = -EINVAL; + goto unlock; + } + + ns = mlx5_get_root_namespace(dev, ns_type); + if (!ns) { + ret = -EINVAL; + goto unlock; + } + + params.type = pkt_reformat->reformat_type; + params.size = pkt_reformat->fs_hws_action.pr_data->data_size; + params.data = pkt_reformat->fs_hws_action.pr_data->data; + + ret = mlx5_fs_cmd_get_fw_cmds()-> + packet_reformat_alloc(ns, ¶ms, ns_type, &fw_pkt_reformat); + if (ret) + goto unlock; + + *id = fw_pkt_reformat.id; + *reformat_id = *id; + ret = 0; + +unlock: + mutex_unlock(lock); + + return ret; +} + static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns, u16 format_id, u32 *match_mask) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h index 8b56298288da..b92d55b2d147 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h @@ -41,6 +41,11 @@ struct mlx5_fs_hws_action { struct mlx5_fs_pool *fs_pool; struct mlx5_fs_hws_pr *pr_data; struct mlx5_fs_hws_mh *mh_data; + u32 fw_reformat_id; + /* Protect `fw_reformat_id` against being initialized from multiple + * threads. + */ + struct mutex lock; }; struct mlx5_fs_hws_matcher { @@ -84,12 +89,23 @@ void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data); #ifdef CONFIG_MLX5_HW_STEERING +int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id); + bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void); #else +static inline int +mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) +{ + return -EOPNOTSUPP; +} + static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev) { return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h index 30ccd635b505..21279d503117 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h @@ -17,6 +17,7 @@ #include "context.h" #include "table.h" #include "send.h" +#include "action_ste_pool.h" #include "rule.h" #include "cmd.h" #include "action.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c index b61864b32053..ce28ee1c0e41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c @@ -3,25 +3,6 @@ #include "internal.h" -enum mlx5hws_matcher_rtc_type { - HWS_MATCHER_RTC_TYPE_MATCH, - HWS_MATCHER_RTC_TYPE_STE_ARRAY, - HWS_MATCHER_RTC_TYPE_MAX, -}; - -static const char * const mlx5hws_matcher_rtc_type_str[] = { - [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH", - [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY", - [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN", -}; - -static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type) -{ - if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX) - rtc_type = HWS_MATCHER_RTC_TYPE_MAX; - return mlx5hws_matcher_rtc_type_str[rtc_type]; -} - static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules) { /* Collision table concatenation is done only for large rule tables */ @@ -42,19 +23,199 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher) mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id); } +int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl, + u32 miss_ft_id) +{ + struct mlx5hws_matcher *tmp_matcher; + + if (list_empty(&tbl->matchers_list)) + return -EINVAL; + + /* Update isolated_matcher_end_ft_id attribute for all + * the matchers in isolated table. + */ + list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) + tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id; + + tmp_matcher = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + + return mlx5hws_table_ft_set_next_ft(tbl->ctx, + tmp_matcher->end_ft_id, + tbl->fw_ft_type, + miss_ft_id); +} + +static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + u32 end_ft_id; + int ret; + + /* Reset end_ft next RTCs */ + ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx, + matcher->end_ft_id, + matcher->tbl->fw_ft_type, + 0, 0); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n"); + return ret; + } + + /* Connect isolated matcher's end_ft to the complex matcher's end FT */ + end_ft_id = matcher->attr.isolated_matcher_end_ft_id; + ret = mlx5hws_table_ft_set_next_ft(tbl->ctx, + matcher->end_ft_id, + matcher->tbl->fw_ft_type, + end_ft_id); + + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n"); + return ret; + } + + return 0; +} + +static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + int ret; + + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, + tbl, + &matcher->end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n"); + return ret; + } + + ret = hws_matcher_connect_end_ft_isolated(matcher); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n"); + goto destroy_default_ft; + } + + return 0; + +destroy_default_ft: + mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id); + return ret; +} + static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher) { struct mlx5hws_table *tbl = matcher->tbl; int ret; - ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id); + if (mlx5hws_matcher_is_isolated(matcher)) + ret = hws_matcher_create_end_ft_isolated(matcher); + else + ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, + &matcher->end_ft_id); + if (ret) { mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n"); return ret; } + return 0; } +static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + int ret; + + /* Isolated matcher's end_ft is already pointing to the end_ft + * of the complex matcher - it was set at creation of end_ft, + * so no need to connect it. + * We still need to connect the isolated table's start FT to + * this matcher's RTC. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n"); + return ret; + } + + /* Reset table's FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n"); + return ret; + } + + list_add(&matcher->list_node, &tbl->matchers_list); + + return ret; +} + +static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + struct mlx5hws_matcher *last; + int ret; + + last = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + + /* New matcher's end_ft is already pointing to the end_ft of + * the complex matcher. + * Connect previous matcher's end_ft to this new matcher RTC. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + last->end_ft_id, + tbl->fw_ft_type, + matcher->match_ste.rtc_0_id, + matcher->match_ste.rtc_1_id); + if (ret) { + mlx5hws_err(ctx, + "Isolated matcher: failed to connect matcher end_ft to new match RTC\n"); + return ret; + } + + /* Reset prev matcher FT default miss (drop refcount) */ + ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id); + if (ret) { + mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n"); + return ret; + } + + /* Insert after the last matcher */ + list_add(&matcher->list_node, &last->list_node); + + return 0; +} + +static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher) +{ + /* Isolated matcher is expected to be the only one in its table. + * However, it can have a collision matcher, and it can go through + * rehash process, in which case we will temporary have both old and + * new matchers in the isolated table. + * Check if this is the first matcher in the isolated table. + */ + if (list_empty(&matcher->tbl->matchers_list)) + return hws_matcher_connect_isolated_first(matcher); + + /* If this wasn't the first matcher, then we have 3 possible cases: + * - this is a collision matcher for the first matcher + * - this is a new rehash dest matcher + * - this is a collision matcher for the new rehash dest matcher + * The logic to add new matcher is the same for all these cases. + */ + return hws_matcher_connect_isolated_last(matcher); +} + static int hws_matcher_connect(struct mlx5hws_matcher *matcher) { struct mlx5hws_table *tbl = matcher->tbl; @@ -64,6 +225,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher) struct mlx5hws_matcher *tmp_matcher; int ret; + if (mlx5hws_matcher_is_isolated(matcher)) + return hws_matcher_connect_isolated(matcher); + /* Find location in matcher list */ if (list_empty(&tbl->matchers_list)) { list_add(&matcher->list_node, &tbl->matchers_list); @@ -140,6 +304,92 @@ remove_from_list: return ret; } +static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher) +{ + struct mlx5hws_matcher *first, *last, *prev, *next; + struct mlx5hws_table *tbl = matcher->tbl; + struct mlx5hws_context *ctx = tbl->ctx; + u32 end_ft_id; + int ret; + + first = list_first_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + last = list_last_entry(&tbl->matchers_list, + struct mlx5hws_matcher, + list_node); + prev = list_prev_entry(matcher, list_node); + next = list_next_entry(matcher, list_node); + + list_del_init(&matcher->list_node); + + if (first == last) { + /* This was the only matcher in the list. + * Reset isolated table FT next RTCs and connect it + * to the whole complex matcher end FT instead. + */ + ret = mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + 0, 0); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n"); + return ret; + } + + end_ft_id = matcher->attr.isolated_matcher_end_ft_id; + ret = mlx5hws_table_ft_set_next_ft(tbl->ctx, + tbl->ft_id, + tbl->fw_ft_type, + end_ft_id); + if (ret) { + mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n"); + return ret; + } + + return 0; + } + + /* At this point we know that there are more matchers in the list */ + + if (matcher == first) { + /* We've disconnected the first matcher. + * Now update isolated table default FT. + */ + if (!next) + return -EINVAL; + return mlx5hws_table_ft_set_next_rtc(ctx, + tbl->ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); + } + + if (matcher == last) { + /* If we've disconnected the last matcher - update prev + * matcher's end_ft to point to the complex matcher end_ft. + */ + if (!prev) + return -EINVAL; + return hws_matcher_connect_end_ft_isolated(prev); + } + + /* This wasn't the first or the last matcher, which means that it has + * both prev and next matchers. Note that this only happens if we're + * disconnecting collision matcher of the old matcher during rehash. + */ + if (!prev || !next || + !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) + return -EINVAL; + + /* Update prev end FT to point to next match RTC */ + return mlx5hws_table_ft_set_next_rtc(ctx, + prev->end_ft_id, + tbl->fw_ft_type, + next->match_ste.rtc_0_id, + next->match_ste.rtc_1_id); +} + static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) { struct mlx5hws_matcher *next = NULL, *prev = NULL; @@ -147,6 +397,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) u32 prev_ft_id = tbl->ft_id; int ret; + if (mlx5hws_matcher_is_isolated(matcher)) + return hws_matcher_disconnect_isolated(matcher); + if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) { prev = list_prev_entry(matcher, list_node); prev_ft_id = prev->end_ft_id; @@ -197,149 +450,96 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher) static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher, struct mlx5hws_cmd_rtc_create_attr *rtc_attr, - enum mlx5hws_matcher_rtc_type rtc_type, bool is_mirror) { - struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste; enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src; - bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH; if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) || (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) { /* Optimize FDB RTC */ rtc_attr->log_size = 0; rtc_attr->log_depth = 0; - } else { - /* Keep original values */ - rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order; - rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0; } } -static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, - enum mlx5hws_matcher_rtc_type rtc_type) +static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher) { struct mlx5hws_matcher_attr *attr = &matcher->attr; struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0}; struct mlx5hws_match_template *mt = matcher->mt; struct mlx5hws_context *ctx = matcher->tbl->ctx; - struct mlx5hws_action_default_stc *default_stc; - struct mlx5hws_matcher_action_ste *action_ste; struct mlx5hws_table *tbl = matcher->tbl; - struct mlx5hws_pool *ste_pool, *stc_pool; - struct mlx5hws_pool_chunk *ste; - u32 *rtc_0_id, *rtc_1_id; u32 obj_id; int ret; - switch (rtc_type) { - case HWS_MATCHER_RTC_TYPE_MATCH: - rtc_0_id = &matcher->match_ste.rtc_0_id; - rtc_1_id = &matcher->match_ste.rtc_1_id; - ste_pool = matcher->match_ste.pool; - ste = &matcher->match_ste.ste; - ste->order = attr->table.sz_col_log + attr->table.sz_row_log; - - rtc_attr.log_size = attr->table.sz_row_log; - rtc_attr.log_depth = attr->table.sz_col_log; - rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); - rtc_attr.is_scnd_range = 0; - rtc_attr.miss_ft_id = matcher->end_ft_id; - - if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) { - /* The usual Hash Table */ - rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; - - /* The first mt is used since all share the same definer */ - rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); - } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) { - rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; - rtc_attr.num_hash_definer = 1; - - if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { - /* Hash Split Table */ - rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; - rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); - } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { - /* Linear Lookup Table */ - rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR; - rtc_attr.match_definer_0 = ctx->caps->linear_match_definer; - } - } - - /* Match pool requires implicit allocation */ - ret = mlx5hws_pool_chunk_alloc(ste_pool, ste); - if (ret) { - mlx5hws_err(ctx, "Failed to allocate STE for %s RTC", - hws_matcher_rtc_type_to_str(rtc_type)); - return ret; + rtc_attr.log_size = attr->table.sz_row_log; + rtc_attr.log_depth = attr->table.sz_col_log; + rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt); + rtc_attr.is_scnd_range = 0; + rtc_attr.miss_ft_id = matcher->end_ft_id; + + if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) { + /* The usual Hash Table */ + rtc_attr.update_index_mode = + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH; + + /* The first mt is used since all share the same definer */ + rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer); + } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) { + rtc_attr.update_index_mode = + MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; + rtc_attr.num_hash_definer = 1; + + if (attr->distribute_mode == + MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) { + /* Hash Split Table */ + rtc_attr.access_index_mode = + MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH; + rtc_attr.match_definer_0 = + mlx5hws_definer_get_id(mt->definer); + } else if (attr->distribute_mode == + MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) { + /* Linear Lookup Table */ + rtc_attr.access_index_mode = + MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR; + rtc_attr.match_definer_0 = + ctx->caps->linear_match_definer; } - break; - - case HWS_MATCHER_RTC_TYPE_STE_ARRAY: - action_ste = &matcher->action_ste; - - rtc_0_id = &action_ste->rtc_0_id; - rtc_1_id = &action_ste->rtc_1_id; - ste_pool = action_ste->pool; - ste = &action_ste->ste; - /* Action RTC size calculation: - * log((max number of rules in matcher) * - * (max number of action STEs per rule) * - * (2 to support writing new STEs for update rule)) - */ - ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) + - attr->table.sz_row_log + - MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT; - rtc_attr.log_size = ste->order; - rtc_attr.log_depth = 0; - rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET; - /* The action STEs use the default always hit definer */ - rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer; - rtc_attr.is_frst_jumbo = false; - rtc_attr.miss_ft_id = 0; - break; - - default: - mlx5hws_err(ctx, "HWS Invalid RTC type\n"); - return -EINVAL; } - obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste); + obj_id = mlx5hws_pool_get_base_id(matcher->match_ste.pool); rtc_attr.pd = ctx->pd_num; rtc_attr.ste_base = obj_id; - rtc_attr.ste_offset = ste->offset; rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx); rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false); - hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false); + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false); /* STC is a single resource (obj_id), use any STC for the ID */ - stc_pool = ctx->stc_pool; - default_stc = ctx->common_res.default_stc; - obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit); + obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool); rtc_attr.stc_base = obj_id; - ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id); + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, + &matcher->match_ste.rtc_0_id); if (ret) { - mlx5hws_err(ctx, "Failed to create matcher RTC of type %s", - hws_matcher_rtc_type_to_str(rtc_type)); - goto free_ste; + mlx5hws_err(ctx, "Failed to create matcher RTC\n"); + return ret; } if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) { - obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste); + obj_id = mlx5hws_pool_get_base_mirror_id( + matcher->match_ste.pool); rtc_attr.ste_base = obj_id; rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true); - obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit); + obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool); rtc_attr.stc_base = obj_id; - hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true); + hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true); - ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id); + ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, + &matcher->match_ste.rtc_1_id); if (ret) { - mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s", - hws_matcher_rtc_type_to_str(rtc_type)); + mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n"); goto destroy_rtc_0; } } @@ -347,46 +547,18 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher, return 0; destroy_rtc_0: - mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id); -free_ste: - if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) - mlx5hws_pool_chunk_free(ste_pool, ste); + mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id); return ret; } -static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher, - enum mlx5hws_matcher_rtc_type rtc_type) +static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher) { - struct mlx5hws_matcher_action_ste *action_ste; - struct mlx5hws_table *tbl = matcher->tbl; - struct mlx5hws_pool_chunk *ste; - struct mlx5hws_pool *ste_pool; - u32 rtc_0_id, rtc_1_id; - - switch (rtc_type) { - case HWS_MATCHER_RTC_TYPE_MATCH: - rtc_0_id = matcher->match_ste.rtc_0_id; - rtc_1_id = matcher->match_ste.rtc_1_id; - ste_pool = matcher->match_ste.pool; - ste = &matcher->match_ste.ste; - break; - case HWS_MATCHER_RTC_TYPE_STE_ARRAY: - action_ste = &matcher->action_ste; - rtc_0_id = action_ste->rtc_0_id; - rtc_1_id = action_ste->rtc_1_id; - ste_pool = action_ste->pool; - ste = &action_ste->ste; - break; - default: - return; - } + struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev; - if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id); + if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) + mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id); - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id); - if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH) - mlx5hws_pool_chunk_free(ste_pool, ste); + mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id); } static int @@ -454,85 +626,17 @@ static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher, return 0; } -static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher) -{ - struct mlx5hws_matcher_resize_data *resize_data; - - resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL); - if (!resize_data) - return -ENOMEM; - - resize_data->max_stes = src_matcher->action_ste.max_stes; - - resize_data->stc = src_matcher->action_ste.stc; - resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id; - resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id; - resize_data->pool = src_matcher->action_ste.max_stes ? - src_matcher->action_ste.pool : NULL; - - /* Place the new resized matcher on the dst matcher's list */ - list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); - - /* Move all the previous resized matchers to the dst matcher's list */ - while (!list_empty(&src_matcher->resize_data)) { - resize_data = list_first_entry(&src_matcher->resize_data, - struct mlx5hws_matcher_resize_data, - list_node); - list_del_init(&resize_data->list_node); - list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data); - } - - return 0; -} - -static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher) -{ - struct mlx5hws_matcher_resize_data *resize_data; - - if (!mlx5hws_matcher_is_resizable(matcher)) - return; - - while (!list_empty(&matcher->resize_data)) { - resize_data = list_first_entry(&matcher->resize_data, - struct mlx5hws_matcher_resize_data, - list_node); - list_del_init(&resize_data->list_node); - - if (resize_data->max_stes) { - mlx5hws_action_free_single_stc(matcher->tbl->ctx, - matcher->tbl->type, - &resize_data->stc); - - if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->rtc_1_id); - - mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, - resize_data->rtc_0_id); - - if (resize_data->pool) - mlx5hws_pool_destroy(resize_data->pool); - } - - kfree(resize_data); - } -} - static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) { bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); - struct mlx5hws_cmd_stc_modify_attr stc_attr = {0}; - struct mlx5hws_matcher_action_ste *action_ste; - struct mlx5hws_table *tbl = matcher->tbl; - struct mlx5hws_pool_attr pool_attr = {0}; - struct mlx5hws_context *ctx = tbl->ctx; - u32 required_stes; - u8 max_stes = 0; + struct mlx5hws_context *ctx = matcher->tbl->ctx; + u8 required_stes, max_stes; int i, ret; if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION) return 0; + max_stes = 0; for (i = 0; i < matcher->num_of_at; i++) { struct mlx5hws_action_template *at = &matcher->at[i]; @@ -548,75 +652,33 @@ static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher) /* Future: Optimize reparse */ } - /* There are no additional STEs required for matcher */ - if (!max_stes) - return 0; - - matcher->action_ste.max_stes = max_stes; - - action_ste = &matcher->action_ste; - - /* Allocate action STE mempool */ - pool_attr.table_type = tbl->type; - pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; - pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL; - /* Pool size is similar to action RTC size */ - pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) + - matcher->attr.table.sz_row_log + - MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT; - hws_matcher_set_pool_attr(&pool_attr, matcher); - action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr); - if (!action_ste->pool) { - mlx5hws_err(ctx, "Failed to create action ste pool\n"); - return -EINVAL; - } - - /* Allocate action RTC */ - ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); - if (ret) { - mlx5hws_err(ctx, "Failed to create action RTC\n"); - goto free_ste_pool; - } - - /* Allocate STC for jumps to STE */ - stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT; - stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE; - stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE; - stc_attr.ste_table.ste = action_ste->ste; - stc_attr.ste_table.ste_pool = action_ste->pool; - stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer; - - ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type, - &action_ste->stc); - if (ret) { - mlx5hws_err(ctx, "Failed to create action jump to table STC\n"); - goto free_rtc; - } + matcher->num_of_action_stes = max_stes; return 0; - -free_rtc: - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); -free_ste_pool: - mlx5hws_pool_destroy(action_ste->pool); - return ret; } -static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher) +static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher) { - struct mlx5hws_matcher_action_ste *action_ste; - struct mlx5hws_table *tbl = matcher->tbl; - - action_ste = &matcher->action_ste; - - if (!action_ste->max_stes || - matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION || - mlx5hws_matcher_is_in_resize(matcher)) - return; + int i; - mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc); - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY); - mlx5hws_pool_destroy(action_ste->pool); + for (i = 0; i < matcher->mt->fc_sz; i++) { + switch (matcher->mt->fc[i].fname) { + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O: + matcher->matches_outer_ethertype = 1; + break; + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O: + matcher->matches_outer_ip_version = 1; + break; + case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I: + matcher->matches_inner_ethertype = 1; + break; + case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I: + matcher->matches_inner_ip_version = 1; + break; + default: + break; + } + } } static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) @@ -635,10 +697,11 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher) } } + hws_matcher_set_ip_version_match(matcher); + /* Create an STE pool per matcher*/ pool_attr.table_type = matcher->tbl->type; pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE; - pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL; pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log + matcher->attr.table.sz_row_log; hws_matcher_set_pool_attr(&pool_attr, matcher); @@ -740,6 +803,8 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps, attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log); matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0; + matcher->flags |= attr->isolated_matcher_end_ft_id ? + MLX5HWS_MATCHER_FLAGS_ISOLATED : 0; return hws_matcher_check_attr_sz(caps, matcher); } @@ -761,10 +826,10 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) /* Create matcher end flow table anchor */ ret = hws_matcher_create_end_ft(matcher); if (ret) - goto unbind_at; + goto unbind_mt; /* Allocate the RTC for the new matcher */ - ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); + ret = hws_matcher_create_rtc(matcher); if (ret) goto destroy_end_ft; @@ -776,11 +841,9 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher) return 0; destroy_rtc: - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); + hws_matcher_destroy_rtc(matcher); destroy_end_ft: hws_matcher_destroy_end_ft(matcher); -unbind_at: - hws_matcher_unbind_at(matcher); unbind_mt: hws_matcher_unbind_mt(matcher); return ret; @@ -788,11 +851,9 @@ unbind_mt: static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher) { - hws_matcher_resize_uninit(matcher); hws_matcher_disconnect(matcher); - hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH); + hws_matcher_destroy_rtc(matcher); hws_matcher_destroy_end_ft(matcher); - hws_matcher_unbind_at(matcher); hws_matcher_unbind_mt(matcher); } @@ -814,8 +875,6 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher) if (!col_matcher) return -ENOMEM; - INIT_LIST_HEAD(&col_matcher->resize_data); - col_matcher->tbl = matcher->tbl; col_matcher->mt = matcher->mt; col_matcher->at = matcher->at; @@ -832,6 +891,8 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher) col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO; col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach; + col_matcher->attr.isolated_matcher_end_ft_id = + matcher->attr.isolated_matcher_end_ft_id; ret = hws_matcher_process_attr(ctx->caps, col_matcher); if (ret) @@ -869,8 +930,6 @@ static int hws_matcher_init(struct mlx5hws_matcher *matcher) struct mlx5hws_context *ctx = matcher->tbl->ctx; int ret; - INIT_LIST_HEAD(&matcher->resize_data); - mutex_lock(&ctx->ctrl_lock); /* Allocate matcher resource and connect to the packet pipe */ @@ -905,18 +964,44 @@ static int hws_matcher_uninit(struct mlx5hws_matcher *matcher) return 0; } +static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher) +{ + void *p; + + if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT) + return -ENOMEM; + + matcher->size_of_at_array *= 2; + p = krealloc(matcher->at, + matcher->size_of_at_array * sizeof(*matcher->at), + __GFP_ZERO | GFP_KERNEL); + if (!p) { + matcher->size_of_at_array /= 2; + return -ENOMEM; + } + + matcher->at = p; + + return 0; +} + int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, struct mlx5hws_action_template *at) { bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt); - struct mlx5hws_context *ctx = matcher->tbl->ctx; u32 required_stes; int ret; - if (!matcher->attr.max_num_of_at_attach) { - mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n", - matcher->num_of_at); - return -EOPNOTSUPP; + if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) { + ret = hws_matcher_grow_at_array(matcher); + if (ret) + return ret; + + if (matcher->col_matcher) { + ret = hws_matcher_grow_at_array(matcher->col_matcher); + if (ret) + return ret; + } } ret = hws_matcher_check_and_process_at(matcher, at); @@ -924,15 +1009,11 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher, return ret; required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term); - if (matcher->action_ste.max_stes < required_stes) { - mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n", - required_stes, matcher->action_ste.max_stes); - return -ENOMEM; - } + if (matcher->num_of_action_stes < required_stes) + matcher->num_of_action_stes = required_stes; matcher->at[matcher->num_of_at] = *at; matcher->num_of_at += 1; - matcher->attr.max_num_of_at_attach -= 1; if (matcher->col_matcher) matcher->col_matcher->num_of_at = matcher->num_of_at; @@ -960,8 +1041,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher, if (!matcher->mt) return -ENOMEM; - matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach, - sizeof(*matcher->at), + matcher->size_of_at_array = + num_of_at + matcher->attr.max_num_of_at_attach; + matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at), GFP_KERNEL); if (!matcher->at) { mlx5hws_err(ctx, "Failed to allocate action template array\n"); @@ -1110,7 +1192,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher, return -EINVAL; } - if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) { + if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) { mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n"); return -EINVAL; } @@ -1139,10 +1221,6 @@ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher, src_matcher->resize_dst = dst_matcher; - ret = hws_matcher_resize_init(src_matcher); - if (ret) - src_matcher->resize_dst = NULL; - out: mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h index 020de70270c5..32e83cddcd60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h @@ -23,6 +23,9 @@ */ #define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1 +/* Maximum number of action templates that can be attached to a matcher. */ +#define MLX5HWS_MATCHER_MAX_AT 128 + enum mlx5hws_matcher_offset { MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12, MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13, @@ -31,6 +34,7 @@ enum mlx5hws_matcher_offset { enum mlx5hws_matcher_flags { MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2, MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3, + MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4, }; struct mlx5hws_match_template { @@ -42,28 +46,15 @@ struct mlx5hws_match_template { }; struct mlx5hws_matcher_match_ste { - struct mlx5hws_pool_chunk ste; u32 rtc_0_id; u32 rtc_1_id; struct mlx5hws_pool *pool; }; -struct mlx5hws_matcher_action_ste { - struct mlx5hws_pool_chunk ste; - struct mlx5hws_pool_chunk stc; - u32 rtc_0_id; - u32 rtc_1_id; - struct mlx5hws_pool *pool; - u8 max_stes; -}; - -struct mlx5hws_matcher_resize_data { - struct mlx5hws_pool_chunk stc; - u32 rtc_0_id; - u32 rtc_1_id; - struct mlx5hws_pool *pool; - u8 max_stes; - struct list_head list_node; +enum { + MLX5HWS_MATCHER_IPV_UNSET = 0, + MLX5HWS_MATCHER_IPV_4 = 1, + MLX5HWS_MATCHER_IPV_6 = 2, }; struct mlx5hws_matcher { @@ -72,16 +63,22 @@ struct mlx5hws_matcher { struct mlx5hws_match_template *mt; struct mlx5hws_action_template *at; u8 num_of_at; + u8 size_of_at_array; u8 num_of_mt; + u8 num_of_action_stes; /* enum mlx5hws_matcher_flags */ u8 flags; + u8 matches_outer_ethertype:1; + u8 matches_outer_ip_version:1; + u8 matches_inner_ethertype:1; + u8 matches_inner_ip_version:1; + u8 outer_ip_version:2; + u8 inner_ip_version:2; u32 end_ft_id; struct mlx5hws_matcher *col_matcher; struct mlx5hws_matcher *resize_dst; struct mlx5hws_matcher_match_ste match_ste; - struct mlx5hws_matcher_action_ste action_ste; struct list_head list_node; - struct list_head resize_data; }; static inline bool @@ -100,9 +97,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher) return !!matcher->resize_dst; } +static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher) +{ + return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED); +} + static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher) { return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX; } +int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl, + u32 miss_ft_id); + #endif /* HWS_MATCHER_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h index 5121951f2778..9bbadc4d8a0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h @@ -119,6 +119,8 @@ struct mlx5hws_matcher_attr { }; /* Optional AT attach configuration - Max number of additional AT */ u8 max_num_of_at_attach; + /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */ + u32 isolated_matcher_end_ft_id; }; struct mlx5hws_rule_attr { @@ -502,6 +504,15 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action); /** + * mlx5hws_action_get_dev - Get mlx5 core device. + * + * @action: The action to get the device from. + * + * Return: mlx5 core device. + */ +struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action); + +/** * mlx5hws_action_create_dest_drop - Create a direct rule drop action. * * @ctx: The context in which the new action will be created. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c index f51ed24526b9..51e4c551e0ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c @@ -490,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern, switch (action_type) { case MLX5_ACTION_TYPE_SET: case MLX5_ACTION_TYPE_ADD: - *src_field = MLX5_GET(set_action_in, pattern, field); - *dst_field = INVALID_FIELD; + *src_field = INVALID_FIELD; + *dst_field = MLX5_GET(set_action_in, pattern, field); break; case MLX5_ACTION_TYPE_COPY: *src_field = MLX5_GET(copy_action_in, pattern, src_field); @@ -522,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s return true; } -void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, - size_t max_actions, size_t *new_size, - u32 *nope_location, __be64 *new_pat) +int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nop_locations, __be64 *new_pat) { - u16 prev_src_field = 0, prev_dst_field = 0; + u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD; u16 src_field, dst_field; u8 action_type; + bool dependent; size_t i, j; *new_size = num_actions; - *nope_location = 0; + *nop_locations = 0; if (num_actions == 1) - return; + return 0; for (i = 0, j = 0; i < num_actions; i++, j++) { - action_type = MLX5_GET(set_action_in, &pattern[i], action_type); + if (j >= max_actions) + return -EINVAL; + action_type = MLX5_GET(set_action_in, &pattern[i], action_type); hws_action_modify_get_target_fields(action_type, &pattern[i], &src_field, &dst_field); - if (i % 2) { - if (action_type == MLX5_ACTION_TYPE_COPY && - (prev_src_field == src_field || - prev_dst_field == dst_field)) { - /* need Nope */ - *new_size += 1; - *nope_location |= BIT(i); - memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); - MLX5_SET(set_action_in, &new_pat[j], - action_type, - MLX5_MODIFICATION_TYPE_NOP); - j++; - } else if (prev_src_field == src_field) { - /* need Nope*/ - *new_size += 1; - *nope_location |= BIT(i); - MLX5_SET(set_action_in, &new_pat[j], - action_type, - MLX5_MODIFICATION_TYPE_NOP); - j++; - } - } - memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); - /* check if no more space */ - if (j > max_actions) { - *new_size = num_actions; - *nope_location = 0; - return; + + /* For every action, look at it and the previous one. The two + * actions are dependent if: + */ + dependent = + (i > 0) && + /* At least one of the actions is a write and */ + (dst_field != INVALID_FIELD || + prev_dst_field != INVALID_FIELD) && + /* One reads from the other's source */ + (dst_field == prev_src_field || + src_field == prev_dst_field || + /* Or both write to the same destination */ + dst_field == prev_dst_field); + + if (dependent) { + *new_size += 1; + *nop_locations |= BIT(i); + memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE); + MLX5_SET(set_action_in, &new_pat[j], action_type, + MLX5_MODIFICATION_TYPE_NOP); + j++; + if (j >= max_actions) + return -EINVAL; } + memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE); prev_src_field = src_field; prev_dst_field = dst_field; } + + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h index 8ddb51980044..7fbd8dc7aa18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h @@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx, u8 *arg_data, size_t data_size); -void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions, - size_t *new_size, u32 *nope_location, __be64 *new_pat); +int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions, + size_t max_actions, size_t *new_size, + u32 *nop_locations, __be64 *new_pat); #endif /* MLX5HWS_PAT_ARG_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c index 50a81d360bb2..7e37d6e9eb83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c @@ -20,15 +20,14 @@ static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource) kfree(resource); } -static void hws_pool_resource_free(struct mlx5hws_pool *pool, - int resource_idx) +static void hws_pool_resource_free(struct mlx5hws_pool *pool) { - hws_pool_free_one_resource(pool->resource[resource_idx]); - pool->resource[resource_idx] = NULL; + hws_pool_free_one_resource(pool->resource); + pool->resource = NULL; if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { - hws_pool_free_one_resource(pool->mirror_resource[resource_idx]); - pool->mirror_resource[resource_idx] = NULL; + hws_pool_free_one_resource(pool->mirror_resource); + pool->mirror_resource = NULL; } } @@ -61,10 +60,8 @@ hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range, ret = -EINVAL; } - if (ret) { - mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n"); + if (ret) goto free_resource; - } resource->pool = pool; resource->range = 1 << log_range; @@ -77,199 +74,114 @@ free_resource: return NULL; } -static int -hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx) +static int hws_pool_resource_alloc(struct mlx5hws_pool *pool) { struct mlx5hws_pool_resource *resource; u32 fw_ft_type, opt_log_range; fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false); - opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range; + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? + 0 : pool->alloc_log_sz; resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); if (!resource) { - mlx5hws_err(pool->ctx, "Failed allocating resource\n"); + mlx5hws_err(pool->ctx, "Failed to allocate resource\n"); return -EINVAL; } - pool->resource[idx] = resource; + pool->resource = resource; if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) { struct mlx5hws_pool_resource *mirror_resource; fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true); - opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range; + opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? + 0 : pool->alloc_log_sz; mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type); if (!mirror_resource) { - mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n"); + mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n"); hws_pool_free_one_resource(resource); - pool->resource[idx] = NULL; + pool->resource = NULL; return -EINVAL; } - pool->mirror_resource[idx] = mirror_resource; + pool->mirror_resource = mirror_resource; } return 0; } -static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range) -{ - unsigned long *cur_bmp; - - cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL); - if (!cur_bmp) - return NULL; - - bitmap_fill(cur_bmp, 1 << log_range); - - return cur_bmp; -} - -static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static int hws_pool_buddy_init(struct mlx5hws_pool *pool) { struct mlx5hws_buddy_mem *buddy; - buddy = pool->db.buddy_manager->buddies[chunk->resource_idx]; + buddy = mlx5hws_buddy_create(pool->alloc_log_sz); if (!buddy) { - mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx); - return; - } - - mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); -} - -static struct mlx5hws_buddy_mem * -hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx, - u32 order, bool *is_new_buddy) -{ - static struct mlx5hws_buddy_mem *buddy; - u32 new_buddy_size; - - buddy = pool->db.buddy_manager->buddies[idx]; - if (buddy) - return buddy; - - new_buddy_size = max(pool->alloc_log_sz, order); - *is_new_buddy = true; - buddy = mlx5hws_buddy_create(new_buddy_size); - if (!buddy) { - mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n", - new_buddy_size, idx); - return NULL; + mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n", + pool->alloc_log_sz); + return -ENOMEM; } - if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) { - mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", - pool->type, new_buddy_size, idx); + if (hws_pool_resource_alloc(pool) != 0) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n", + pool->type, pool->alloc_log_sz); mlx5hws_buddy_cleanup(buddy); - return NULL; + return -ENOMEM; } - pool->db.buddy_manager->buddies[idx] = buddy; + pool->db.buddy = buddy; - return buddy; + return 0; } -static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool, - int order, - u32 *buddy_idx, - int *seg) +static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) { - struct mlx5hws_buddy_mem *buddy; - bool new_mem = false; - int ret = 0; - int i; - - *seg = -1; - - /* Find the next free place from the buddy array */ - while (*seg < 0) { - for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { - buddy = hws_pool_buddy_get_next_buddy(pool, i, - order, - &new_mem); - if (!buddy) { - ret = -ENOMEM; - goto out; - } - - *seg = mlx5hws_buddy_alloc_mem(buddy, order); - if (*seg >= 0) - goto found; - - if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) { - mlx5hws_err(pool->ctx, - "Fail to allocate seg for one resource pool\n"); - ret = -ENOMEM; - goto out; - } - - if (new_mem) { - /* We have new memory pool, should be place for us */ - mlx5hws_err(pool->ctx, - "No memory for order: %d with buddy no: %d\n", - order, i); - ret = -ENOMEM; - goto out; - } - } + struct mlx5hws_buddy_mem *buddy = pool->db.buddy; + + if (!buddy) { + mlx5hws_err(pool->ctx, "Bad buddy state\n"); + return -EINVAL; } -found: - *buddy_idx = i; -out: - return ret; + chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order); + if (chunk->offset >= 0) + return 0; + + return -ENOMEM; } -static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) { - int ret = 0; + struct mlx5hws_buddy_mem *buddy; - /* Go over the buddies and find next free slot */ - ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order, - &chunk->resource_idx, - &chunk->offset); - if (ret) - mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", - chunk->order); + buddy = pool->db.buddy; + if (!buddy) { + mlx5hws_err(pool->ctx, "Bad buddy state\n"); + return; + } - return ret; + mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); } static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool) { struct mlx5hws_buddy_mem *buddy; - int i; - - for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { - buddy = pool->db.buddy_manager->buddies[i]; - if (buddy) { - mlx5hws_buddy_cleanup(buddy); - kfree(buddy); - pool->db.buddy_manager->buddies[i] = NULL; - } - } - kfree(pool->db.buddy_manager); + buddy = pool->db.buddy; + if (buddy) { + mlx5hws_buddy_cleanup(buddy); + kfree(buddy); + pool->db.buddy = NULL; + } } -static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range) +static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool) { - pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL); - if (!pool->db.buddy_manager) - return -ENOMEM; - - if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) { - bool new_buddy; + int ret; - if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) { - mlx5hws_err(pool->ctx, - "Failed allocating memory on create log_sz: %d\n", log_range); - kfree(pool->db.buddy_manager); - return -ENOMEM; - } - } + ret = hws_pool_buddy_init(pool); + if (ret) + return ret; pool->p_db_uninit = &hws_pool_buddy_db_uninit; pool->p_get_chunk = &hws_pool_buddy_db_get_chunk; @@ -278,261 +190,105 @@ static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range) return 0; } -static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool, - u32 alloc_size, int idx) -{ - int ret = hws_pool_resource_alloc(pool, alloc_size, idx); - - if (ret) { - mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", - pool->type, alloc_size, idx); - return ret; - } - - return 0; -} - -static struct mlx5hws_pool_elements * -hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx) +static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range) { - struct mlx5hws_pool_elements *elem; - u32 alloc_size; - - alloc_size = pool->alloc_log_sz; + unsigned long *bitmap; - elem = kzalloc(sizeof(*elem), GFP_KERNEL); - if (!elem) + bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL); + if (!bitmap) return NULL; - /* Sharing the same resource, also means that all the elements are with size 1 */ - if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) && - !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) { - /* Currently all chunks in size 1 */ - elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order); - if (!elem->bitmap) { - mlx5hws_err(pool->ctx, - "Failed to create bitmap type: %d: size %d index: %d\n", - pool->type, alloc_size, idx); - goto free_elem; - } - - elem->log_size = alloc_size - order; - } - - if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) { - mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n", - pool->type, alloc_size, idx); - goto free_db; - } - - pool->db.element_manager->elements[idx] = elem; - - return elem; + bitmap_fill(bitmap, 1 << log_range); -free_db: - bitmap_free(elem->bitmap); -free_elem: - kfree(elem); - return NULL; + return bitmap; } -static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg) +static int hws_pool_bitmap_init(struct mlx5hws_pool *pool) { - unsigned int segment, size; - - size = 1 << elem->log_size; + unsigned long *bitmap; - segment = find_first_bit(elem->bitmap, size); - if (segment >= size) { - elem->is_full = true; + bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz); + if (!bitmap) { + mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n", + pool->alloc_log_sz); return -ENOMEM; } - bitmap_clear(elem->bitmap, segment, 1); - *seg = segment; - return 0; -} - -static int -hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, - u32 *idx, int *seg) -{ - struct mlx5hws_pool_elements *elem; - - elem = pool->db.element_manager->elements[0]; - if (!elem) - elem = hws_pool_element_create_new_elem(pool, order, 0); - if (!elem) - goto err_no_elem; - - if (hws_pool_element_find_seg(elem, seg) != 0) { - mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); + if (hws_pool_resource_alloc(pool) != 0) { + mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n", + pool->type, pool->alloc_log_sz); + bitmap_free(bitmap); return -ENOMEM; } - *idx = 0; - elem->num_of_elements++; - return 0; + pool->db.bitmap = bitmap; -err_no_elem: - mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); - return -ENOMEM; -} - -static int -hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order, - u32 *idx, int *seg) -{ - int ret, i; - - for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { - if (!pool->resource[i]) { - ret = hws_pool_create_resource_on_index(pool, order, i); - if (ret) - goto err_no_res; - *idx = i; - *seg = 0; /* One memory slot in that element */ - return 0; - } - } - - mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order); - return -ENOMEM; - -err_no_res: - mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order); - return -ENOMEM; + return 0; } -static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) { - int ret; - - /* Go over all memory elements and find/allocate free slot */ - ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order, - &chunk->resource_idx, - &chunk->offset); - if (ret) - mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", - chunk->order); + unsigned long *bitmap, size; - return ret; -} + if (chunk->order != 0) { + mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n"); + return -EINVAL; + } -static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) -{ - if (unlikely(!pool->resource[chunk->resource_idx])) - pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); + bitmap = pool->db.bitmap; + if (!bitmap) { + mlx5hws_err(pool->ctx, "Bad bitmap state\n"); + return -EINVAL; + } - if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE) - hws_pool_resource_free(pool, chunk->resource_idx); -} + size = 1 << pool->alloc_log_sz; -static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool) -{ - (void)pool; -} + chunk->offset = find_first_bit(bitmap, size); + if (chunk->offset >= size) + return -ENOMEM; -/* This memory management works as the following: - * - At start doesn't allocate no mem at all. - * - When new request for chunk arrived: - * allocate resource and give it. - * - When free that chunk: - * the resource is freed. - */ -static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool) -{ - pool->p_db_uninit = &hws_pool_general_element_db_uninit; - pool->p_get_chunk = &hws_pool_general_element_db_get_chunk; - pool->p_put_chunk = &hws_pool_general_element_db_put_chunk; + bitmap_clear(bitmap, chunk->offset, 1); return 0; } -static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool, - struct mlx5hws_pool_elements *elem, - struct mlx5hws_pool_chunk *chunk) -{ - if (unlikely(!pool->resource[chunk->resource_idx])) - pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); - - hws_pool_resource_free(pool, chunk->resource_idx); - kfree(elem); - pool->db.element_manager->elements[chunk->resource_idx] = NULL; -} - -static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool, + struct mlx5hws_pool_chunk *chunk) { - struct mlx5hws_pool_elements *elem; + unsigned long *bitmap; - if (unlikely(chunk->resource_idx)) - pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx); - - elem = pool->db.element_manager->elements[chunk->resource_idx]; - if (!elem) { - mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx); + bitmap = pool->db.bitmap; + if (!bitmap) { + mlx5hws_err(pool->ctx, "Bad bitmap state\n"); return; } - bitmap_set(elem->bitmap, chunk->offset, 1); - elem->is_full = false; - elem->num_of_elements--; - - if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE && - !elem->num_of_elements) - hws_onesize_element_db_destroy_element(pool, elem, chunk); + bitmap_set(bitmap, chunk->offset, 1); } -static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool) { - int ret = 0; + unsigned long *bitmap; - /* Go over all memory elements and find/allocate free slot */ - ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order, - &chunk->resource_idx, - &chunk->offset); - if (ret) - mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n", - chunk->order); - - return ret; -} - -static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool) -{ - struct mlx5hws_pool_elements *elem; - int i; - - for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) { - elem = pool->db.element_manager->elements[i]; - if (elem) { - bitmap_free(elem->bitmap); - kfree(elem); - pool->db.element_manager->elements[i] = NULL; - } + bitmap = pool->db.bitmap; + if (bitmap) { + bitmap_free(bitmap); + pool->db.bitmap = NULL; } - kfree(pool->db.element_manager); } -/* This memory management works as the following: - * - At start doesn't allocate no mem at all. - * - When new request for chunk arrived: - * aloocate the first and only slot of memory/resource - * when it ended return error. - */ -static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool) +static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool) { - pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL); - if (!pool->db.element_manager) - return -ENOMEM; + int ret; - pool->p_db_uninit = &hws_onesize_element_db_uninit; - pool->p_get_chunk = &hws_onesize_element_db_get_chunk; - pool->p_put_chunk = &hws_onesize_element_db_put_chunk; + ret = hws_pool_bitmap_init(pool); + if (ret) + return ret; + + pool->p_db_uninit = &hws_pool_bitmap_db_uninit; + pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk; + pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk; return 0; } @@ -542,15 +298,14 @@ static int hws_pool_db_init(struct mlx5hws_pool *pool, { int ret; - if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE) - ret = hws_pool_general_element_db_init(pool); - else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE) - ret = hws_pool_onesize_element_db_init(pool); + if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP) + ret = hws_pool_bitmap_db_init(pool); else - ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz); + ret = hws_pool_buddy_db_init(pool); if (ret) { - mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret); + mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n", + db_type, ret); return ret; } @@ -569,6 +324,8 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, mutex_lock(&pool->lock); ret = pool->p_get_chunk(pool, chunk); + if (ret == 0) + pool->available_elems -= 1 << chunk->order; mutex_unlock(&pool->lock); return ret; @@ -579,6 +336,7 @@ void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, { mutex_lock(&pool->lock); pool->p_put_chunk(pool, chunk); + pool->available_elems += 1 << chunk->order; mutex_unlock(&pool->lock); } @@ -599,17 +357,13 @@ mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_ pool->tbl_type = pool_attr->table_type; pool->opt_type = pool_attr->opt_type; - /* Support general db */ - if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | - MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) - res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE; - else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE | - MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS)) - res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE; - else + if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY) res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY; + else + res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP; pool->alloc_log_sz = pool_attr->alloc_log_sz; + pool->available_elems = 1 << pool_attr->alloc_log_sz; if (hws_pool_db_init(pool, res_db_type)) goto free_pool; @@ -623,18 +377,17 @@ free_pool: return NULL; } -int mlx5hws_pool_destroy(struct mlx5hws_pool *pool) +void mlx5hws_pool_destroy(struct mlx5hws_pool *pool) { - int i; - mutex_destroy(&pool->lock); - for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) - if (pool->resource[i]) - hws_pool_resource_free(pool, i); + if (pool->available_elems != 1 << pool->alloc_log_sz) + mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n"); + + if (pool->resource) + hws_pool_resource_free(pool); hws_pool_db_unint(pool); kfree(pool); - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h index 621298b352b2..33e33d5f1fb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h @@ -6,16 +6,12 @@ #define MLX5HWS_POOL_STC_LOG_SZ 15 -#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100 - enum mlx5hws_pool_type { MLX5HWS_POOL_TYPE_STE, MLX5HWS_POOL_TYPE_STC, }; struct mlx5hws_pool_chunk { - u32 resource_idx; - /* Internal offset, relative to base index */ int offset; int order; }; @@ -27,35 +23,17 @@ struct mlx5hws_pool_resource { }; enum mlx5hws_pool_flags { - /* Only a one resource in that pool */ - MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0, - MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1, - /* No sharing resources between chunks */ - MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2, - /* All objects are in the same size */ - MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3, - /* Managed by buddy allocator */ - MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4, - /* Allocate pool_type memory on pool creation */ - MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5, - - /* These values should be used by the caller */ - MLX5HWS_POOL_FLAGS_FOR_STC_POOL = - MLX5HWS_POOL_FLAGS_ONE_RESOURCE | - MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS, - MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL = - MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE | - MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK, - MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL = - MLX5HWS_POOL_FLAGS_ONE_RESOURCE | - MLX5HWS_POOL_FLAGS_BUDDY_MANAGED | - MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE, + /* Managed by a buddy allocator. If this is not set only allocations of + * order 0 are supported. + */ + MLX5HWS_POOL_FLAG_BUDDY = BIT(0), }; enum mlx5hws_pool_optimize { MLX5HWS_POOL_OPTIMIZE_NONE = 0x0, MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1, MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2, + MLX5HWS_POOL_OPTIMIZE_MAX = 0x3, }; struct mlx5hws_pool_attr { @@ -68,34 +46,17 @@ struct mlx5hws_pool_attr { }; enum mlx5hws_db_type { - /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/ - MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE, - /* One resource only, all the elements are with same one size */ - MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE, - /* Many resources, the memory allocated with buddy mechanism */ + /* Uses a bitmap, supports only allocations of order 0. */ + MLX5HWS_POOL_DB_TYPE_BITMAP, + /* Entries are managed using a buddy mechanism. */ MLX5HWS_POOL_DB_TYPE_BUDDY, }; -struct mlx5hws_buddy_manager { - struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ]; -}; - -struct mlx5hws_pool_elements { - u32 num_of_elements; - unsigned long *bitmap; - u32 log_size; - bool is_full; -}; - -struct mlx5hws_element_manager { - struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ]; -}; - struct mlx5hws_pool_db { enum mlx5hws_db_type type; union { - struct mlx5hws_element_manager *element_manager; - struct mlx5hws_buddy_manager *buddy_manager; + unsigned long *bitmap; + struct mlx5hws_buddy_mem *buddy; }; }; @@ -111,11 +72,11 @@ struct mlx5hws_pool { enum mlx5hws_pool_flags flags; struct mutex lock; /* protect the pool */ size_t alloc_log_sz; + size_t available_elems; enum mlx5hws_table_type tbl_type; enum mlx5hws_pool_optimize opt_type; - struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; - struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ]; - /* DB */ + struct mlx5hws_pool_resource *resource; + struct mlx5hws_pool_resource *mirror_resource; struct mlx5hws_pool_db db; /* Functions */ mlx5hws_pool_unint_db p_db_uninit; @@ -127,7 +88,7 @@ struct mlx5hws_pool * mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr); -int mlx5hws_pool_destroy(struct mlx5hws_pool *pool); +void mlx5hws_pool_destroy(struct mlx5hws_pool *pool); int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, struct mlx5hws_pool_chunk *chunk); @@ -135,17 +96,37 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool, void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool, struct mlx5hws_pool_chunk *chunk); -static inline u32 -mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool) { - return pool->resource[chunk->resource_idx]->base_id; + return pool->resource->base_id; } -static inline u32 -mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool, - struct mlx5hws_pool_chunk *chunk) +static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool) { - return pool->mirror_resource[chunk->resource_idx]->base_id; + return pool->mirror_resource->base_id; +} + +static inline bool +mlx5hws_pool_empty(struct mlx5hws_pool *pool) +{ + bool ret; + + mutex_lock(&pool->lock); + ret = pool->available_elems == 0; + mutex_unlock(&pool->lock); + + return ret; +} + +static inline bool +mlx5hws_pool_full(struct mlx5hws_pool *pool) +{ + bool ret; + + mutex_lock(&pool->lock); + ret = pool->available_elems == (1 << pool->alloc_log_sz); + mutex_unlock(&pool->lock); + + return ret; } #endif /* MLX5HWS_POOL_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c index a27a2d5ffc7b..5342a4cc7194 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c @@ -195,44 +195,30 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule, } } -static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule) +static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule, + u16 queue_id, bool skip_rx, + bool skip_tx) { struct mlx5hws_matcher *matcher = rule->matcher; - struct mlx5hws_matcher_action_ste *action_ste; - struct mlx5hws_pool_chunk ste = {0}; - int ret; - - action_ste = &matcher->action_ste; - ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes)); - ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste); - if (unlikely(ret)) { - mlx5hws_err(matcher->tbl->ctx, - "Failed to allocate STE for rule actions"); - return ret; - } - - rule->action_ste.pool = matcher->action_ste.pool; - rule->action_ste.num_stes = matcher->action_ste.max_stes; - rule->action_ste.index = ste.offset; + struct mlx5hws_context *ctx = matcher->tbl->ctx; - return 0; + rule->action_ste.ste.order = + ilog2(roundup_pow_of_two(matcher->num_of_action_stes)); + return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id], + skip_rx, skip_tx, + &rule->action_ste); } -void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste) +void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste) { - struct mlx5hws_pool_chunk ste = {0}; - - if (!action_ste->num_stes) + if (!action_ste->action_tbl) return; - ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes)); - ste.offset = action_ste->index; - /* This release is safe only when the rule match STE was deleted * (when the rule is being deleted) or replaced with the new STE that * isn't pointing to old action STEs (when the rule is being updated). */ - mlx5hws_pool_chunk_free(action_ste->pool, &ste); + mlx5hws_action_ste_chunk_free(action_ste); } static void hws_rule_create_init(struct mlx5hws_rule *rule, @@ -250,22 +236,15 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule, rule->rtc_0 = 0; rule->rtc_1 = 0; - rule->action_ste.pool = NULL; - rule->action_ste.num_stes = 0; - rule->action_ste.index = -1; - rule->status = MLX5HWS_RULE_STATUS_CREATING; } else { rule->status = MLX5HWS_RULE_STATUS_UPDATING; + /* Save the old action STE info so we can free it after writing + * new action STEs and a corresponding match STE. + */ + rule->old_action_ste = rule->action_ste; } - /* Initialize the old action STE info - shallow-copy action_ste. - * In create flow this will set old_action_ste fields to initial values. - * In update flow this will save the existing action STE info, - * so that we will later use it to free old STEs. - */ - rule->old_action_ste = rule->action_ste; - rule->pending_wqes = 0; /* Init default send STE attributes */ @@ -277,7 +256,6 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule, /* Init default action apply */ apply->tbl_type = tbl->type; apply->common_res = &ctx->common_res; - apply->jump_to_action_stc = matcher->action_ste.stc.offset; apply->require_dep = 0; } @@ -353,17 +331,24 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule, if (action_stes) { /* Allocate action STEs for rules that need more than match STE */ - ret = hws_rule_alloc_action_ste(rule); + ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id, + !!ste_attr.rtc_0, + !!ste_attr.rtc_1); if (ret) { mlx5hws_err(ctx, "Failed to allocate action memory %d", ret); mlx5hws_send_abort_new_dep_wqe(queue); return ret; } + apply.jump_to_action_stc = + rule->action_ste.action_tbl->stc.offset; /* Skip RX/TX based on the dep_wqe init */ - ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0; - ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0; + ste_attr.rtc_0 = dep_wqe->rtc_0 ? + rule->action_ste.action_tbl->rtc_0_id : 0; + ste_attr.rtc_1 = dep_wqe->rtc_1 ? + rule->action_ste.action_tbl->rtc_1_id : 0; /* Action STEs are written to a specific index last to first */ - ste_attr.direct_index = rule->action_ste.index + action_stes; + ste_attr.direct_index = + rule->action_ste.ste.offset + action_stes; apply.next_direct_idx = ste_attr.direct_index; } else { apply.next_direct_idx = 0; @@ -670,6 +655,124 @@ int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule, return 0; } +static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype) +{ + switch (ethertype) { + case ETH_P_IP: + return MLX5HWS_MATCHER_IPV_4; + case ETH_P_IPV6: + return MLX5HWS_MATCHER_IPV_6; + default: + return MLX5HWS_MATCHER_IPV_UNSET; + } +} + +static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version) +{ + switch (ip_version) { + case 4: + return MLX5HWS_MATCHER_IPV_4; + case 6: + return MLX5HWS_MATCHER_IPV_6; + default: + return MLX5HWS_MATCHER_IPV_UNSET; + } +} + +static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher, + u32 *match_param) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET; + u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET; + u8 outer_ipv, ver; + + if (matcher->matches_outer_ethertype) { + ver = MLX5_GET(fte_match_param, match_param, + outer_headers.ethertype); + outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver); + } + if (matcher->matches_outer_ip_version) { + ver = MLX5_GET(fte_match_param, match_param, + outer_headers.ip_version); + outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver); + } + + if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET && + outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET && + outer_ipv_ether != outer_ipv_ip) { + mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n"); + return -EINVAL; + } + + outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ? + outer_ipv_ether : outer_ipv_ip; + if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET && + matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET && + outer_ipv != matcher->outer_ip_version) { + mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n"); + return -EINVAL; + } + matcher->outer_ip_version = outer_ipv; + + return 0; +} + +static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher, + u32 *match_param) +{ + struct mlx5hws_context *ctx = matcher->tbl->ctx; + u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET; + u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET; + u8 inner_ipv, ver; + + if (matcher->matches_inner_ethertype) { + ver = MLX5_GET(fte_match_param, match_param, + inner_headers.ethertype); + inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver); + } + if (matcher->matches_inner_ip_version) { + ver = MLX5_GET(fte_match_param, match_param, + inner_headers.ip_version); + inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver); + } + + if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET && + inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET && + inner_ipv_ether != inner_ipv_ip) { + mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n"); + return -EINVAL; + } + + inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ? + inner_ipv_ether : inner_ipv_ip; + if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET && + matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET && + inner_ipv != matcher->inner_ip_version) { + mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n"); + return -EINVAL; + } + matcher->inner_ip_version = inner_ipv; + + return 0; +} + +static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher, + u32 *match_param) +{ + int ret; + + ret = hws_rule_check_outer_ip_version(matcher, match_param); + if (unlikely(ret)) + return ret; + + ret = hws_rule_check_inner_ip_version(matcher, match_param); + if (unlikely(ret)) + return ret; + + return 0; +} + int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, u8 mt_idx, u32 *match_param, @@ -680,6 +783,10 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher, { int ret; + ret = hws_rule_check_ip_version(matcher, match_param); + if (unlikely(ret)) + return ret; + rule_handle->matcher = matcher; ret = hws_rule_enqueue_precheck_create(rule_handle, attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h index b5ee94ac449b..1c47a9c11572 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h @@ -43,12 +43,6 @@ struct mlx5hws_rule_match_tag { }; }; -struct mlx5hws_rule_action_ste_info { - struct mlx5hws_pool *pool; - int index; /* STE array index */ - u8 num_stes; -}; - struct mlx5hws_rule_resize_info { u32 rtc_0; u32 rtc_1; @@ -64,8 +58,8 @@ struct mlx5hws_rule { struct mlx5hws_rule_match_tag tag; struct mlx5hws_rule_resize_info *resize_info; }; - struct mlx5hws_rule_action_ste_info action_ste; - struct mlx5hws_rule_action_ste_info old_action_ste; + struct mlx5hws_action_ste_chunk action_ste; + struct mlx5hws_action_ste_chunk old_action_ste; u32 rtc_0; /* The RTC into which the STE was inserted */ u32 rtc_1; /* The RTC into which the STE was inserted */ u8 status; /* enum mlx5hws_rule_status */ @@ -75,7 +69,7 @@ struct mlx5hws_rule { */ }; -void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste); +void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste); int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule, void *queue, void *user_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c index cb6abc4ab7df..c4b22be19a9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c @@ -344,18 +344,133 @@ hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue, } } +static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue, + struct mlx5hws_send_ring_priv *priv, + struct mlx5_cqe64 *cqe) +{ + u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0; + struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx; + u32 opcode = cqe ? get_cqe_opcode(cqe) : 0; + struct mlx5hws_rule *rule = priv->rule; + + /* If something bad happens and lots of rules are failing, we don't + * want to pollute dmesg. Print only the first bad cqe per engine, + * the one that started the avalanche. + */ + if (queue->error_cqe_printed) + return; + + queue->error_cqe_printed = true; + + if (mlx5hws_rule_move_in_progress(rule)) + mlx5hws_err(ctx, + "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n", + HWS_PTR_TO_ID(rule), + rule->resize_info->state == + MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" : + rule->resize_info->state == + MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" : + "UNKNOWN", + rule->pending_wqes); + else + mlx5hws_err(ctx, + "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n", + HWS_PTR_TO_ID(rule), + rule->status == + MLX5HWS_RULE_STATUS_CREATING ? "CREATING" : + rule->status == + MLX5HWS_RULE_STATUS_DELETING ? "DELETING" : + rule->status == + MLX5HWS_RULE_STATUS_FAILING ? "FAILING" : + rule->status == + MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA", + rule->status, + rule->pending_wqes); + + mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n", + HWS_PTR_TO_ID(rule), + HWS_PTR_TO_ID(rule->matcher), + (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ? + "(isolated)" : ""); + + if (!cqe) { + mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n", + HWS_PTR_TO_ID(rule)); + return; + } + + mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n", + HWS_PTR_TO_ID(rule), opcode, + opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" : + opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " "); + + if (opcode == MLX5_CQE_REQ_ERR) { + struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; + + mlx5hws_err(ctx, + " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n", + HWS_PTR_TO_ID(rule), + err_cqe->rsvd1[16]); + mlx5hws_err(ctx, + " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n", + HWS_PTR_TO_ID(rule), + err_cqe->rsvd1[17] >> 4); + mlx5hws_err(ctx, + " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n", + HWS_PTR_TO_ID(rule), + err_cqe->vendor_err_synd); + mlx5hws_err(ctx, + " rule 0x%08llx: |--- syndrome = 0x%x\n", + HWS_PTR_TO_ID(rule), + err_cqe->syndrome); + } + + mlx5hws_err(ctx, + " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n", + HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt)); + mlx5hws_err(ctx, + " rule 0x%08llx: |-- UPDATE STATUS = %s\n", + HWS_PTR_TO_ID(rule), + (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ? + "FAILURE" : "SUCCESS"); + mlx5hws_err(ctx, + " rule 0x%08llx: |------- SYNDROME = %s\n", + HWS_PTR_TO_ID(rule), + ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ? + "SET_FLOW_FAIL" : + ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ? + "DISABLE_FLOW_FAIL" : "UNKNOWN"); + mlx5hws_err(ctx, + " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n", + HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn)); + mlx5hws_err(ctx, + " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n", + HWS_PTR_TO_ID(rule), wqe_opcode, + wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ? + "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)"); + mlx5hws_err(ctx, + " rule 0x%08llx: |------------ qpn = 0x%06x\n", + HWS_PTR_TO_ID(rule), + be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff); +} + static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue, struct mlx5hws_send_ring_priv *priv, u16 wqe_cnt, - enum mlx5hws_flow_op_status *status) + enum mlx5hws_flow_op_status *status, + struct mlx5_cqe64 *cqe) { priv->rule->pending_wqes--; - if (*status == MLX5HWS_FLOW_OP_ERROR) { + if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) { if (priv->retry_id) { + /* If there is a retry_id, then it's not an error yet, + * retry to insert this rule in the collision RTC. + */ hws_send_engine_retry_post_send(queue, priv, wqe_cnt); return; } + hws_send_engine_dump_error_cqe(queue, priv, cqe); /* Some part of the rule failed */ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING; *priv->used_id = 0; @@ -420,7 +535,8 @@ static void hws_send_engine_update(struct mlx5hws_send_engine *queue, if (priv->user_data) { if (priv->rule) { - hws_send_engine_update_rule(queue, priv, wqe_cnt, &status); + hws_send_engine_update_rule(queue, priv, wqe_cnt, + &status, cqe); /* Completion is provided on the last rule WQE */ if (priv->rule->pending_wqes) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h index f833092235c1..3fb8e99309b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h @@ -140,6 +140,7 @@ struct mlx5hws_send_engine { u16 used_entries; u16 num_entries; bool err; + bool error_cqe_printed; struct mutex lock; /* Protects the send engine */ }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c index ab1297531232..568f691733f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c @@ -342,10 +342,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id); } -static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, - u32 ft_id, - u32 fw_ft_type, - u32 next_ft_id) +int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id) { struct mlx5hws_cmd_ft_modify_attr ft_attr = {0}; @@ -389,10 +389,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl, if (dst_tbl) { if (list_empty(&dst_tbl->matchers_list)) { /* Connect src_tbl last_ft to dst_tbl start anchor */ - ret = hws_table_ft_set_next_ft(src_tbl->ctx, - last_ft_id, - src_tbl->fw_ft_type, - dst_tbl->ft_id); + ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx, + last_ft_id, + src_tbl->fw_ft_type, + dst_tbl->ft_id); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h index dd50420eec9e..0400cce0c317 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h @@ -65,4 +65,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx, u32 rtc_0_id, u32 rtc_1_id); +int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx, + u32 ft_id, + u32 fw_ft_type, + u32 next_ft_id); + #endif /* MLX5HWS_TABLE_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c index 8007d3f523c9..f367997ab61e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c @@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, return steering_caps; } -int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) { + struct mlx5dr_action *dr_action; + switch (pkt_reformat->reformat_type) { case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: case MLX5_REFORMAT_TYPE_L2_TO_NVGRE: case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL: case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL: case MLX5_REFORMAT_TYPE_INSERT_HDR: - return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action); + dr_action = pkt_reformat->fs_dr_action.dr_action; + *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action); + return 0; } return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h index 99a3b2eff6b8..f869f2daefbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h @@ -38,7 +38,9 @@ struct mlx5_fs_dr_table { bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev); -int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat); +int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void); @@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void) return NULL; } -static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat) +static inline int +mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat, + u32 *reformat_id) { - return 0; + return -EOPNOTSUPP; } static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d10d4c396040..da5c24fc7b30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); - +out: kvfree(out); - return 0; + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); @@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + int err; out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, out); + if (err) + goto out; *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); - +out: kvfree(out); - return 0; + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index e746cd9c68ed..eac9a14a6058 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -205,11 +205,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, return 0; } -static struct thermal_zone_params mlxsw_thermal_params = { +static const struct thermal_zone_params mlxsw_thermal_params = { .no_hwmon = true, }; -static struct thermal_zone_device_ops mlxsw_thermal_ops = { +static const struct thermal_zone_device_ops mlxsw_thermal_ops = { .should_bind = mlxsw_thermal_should_bind, .get_temp = mlxsw_thermal_get_temp, }; @@ -252,7 +252,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, return 0; } -static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { +static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = { .should_bind = mlxsw_thermal_module_should_bind, .get_temp = mlxsw_thermal_module_temp_get, }; @@ -280,7 +280,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, return 0; } -static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { +static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { .should_bind = mlxsw_thermal_module_should_bind, .get_temp = mlxsw_thermal_gearbox_temp_get, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3080ea032e7f..618957d65663 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1159,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev, return 0; } -static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct ifreq *ifr) +static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; - int err; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, - &config); - if (err) - return err; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - return 0; + return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, + config, extack); } -static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct ifreq *ifr) +static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; - int err; - - err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, - &config); - if (err) - return err; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - return 0; + return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port, + config); } static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port) { - struct hwtstamp_config config = {0}; - - mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config); -} - -static int -mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct kernel_hwtstamp_config config = {}; - switch (cmd) { - case SIOCSHWTSTAMP: - return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr); - case SIOCGHWTSTAMP: - return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr); - default: - return -EOPNOTSUPP; - } + mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config, + NULL); } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { @@ -1232,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_set_features = mlxsw_sp_set_features, - .ndo_eth_ioctl = mlxsw_sp_port_ioctl, + .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get, + .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set, }; static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 37cd1d002b3b..b03ff9e044f9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -233,9 +233,10 @@ struct mlxsw_sp_ptp_ops { u16 local_port); int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void (*shaper_work)(struct work_struct *work); int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, struct kernel_ethtool_ts_info *info); @@ -351,7 +352,7 @@ struct mlxsw_sp_port { struct mlxsw_sp_flow_block *eg_flow_block; struct { struct delayed_work shaper_dw; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; u16 ing_types; u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 3f64cdbabfa3..0a8fb9c842d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -262,7 +262,7 @@ err_port_pause_configure: } struct mlxsw_sp_port_hw_stats { - char str[ETH_GSTRING_LEN]; + char str[ETH_GSTRING_LEN] __nonstring; u64 (*getter)(const char *payload); bool cells_bytes; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index ca8b9d18fbb9..e8182dd76c7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -46,7 +46,7 @@ struct mlxsw_sp2_ptp_state { refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping * enabled. */ - struct hwtstamp_config config; + struct kernel_hwtstamp_config config; struct mutex lock; /* Protects 'config' and HW configuration. */ }; @@ -1083,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common) } int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { *config = mlxsw_sp_port->ptp.hwtstamp_config; return 0; } static int -mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config, +mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config, u16 *p_ing_types, u16 *p_egr_types, enum hwtstamp_rx_filters *p_rx_filter) { @@ -1246,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) } int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { enum hwtstamp_rx_filters rx_filter; u16 ing_types; @@ -1270,7 +1271,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; - /* Notify the ioctl caller what we are actually timestamping. */ + /* Notify the caller what we are actually timestamping. */ config->rx_filter = rx_filter; return 0; @@ -1451,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, } int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { struct mlxsw_sp2_ptp_state *ptp_state; @@ -1465,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, } static int -mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config, +mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config, u16 *p_ing_types, u16 *p_egr_types, enum hwtstamp_rx_filters *p_rx_filter) { @@ -1542,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en, static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types, u16 egr_types, - struct hwtstamp_config new_config) + struct kernel_hwtstamp_config new_config) { struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp); int err; @@ -1556,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types, } static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp, - struct hwtstamp_config new_config) + struct kernel_hwtstamp_config new_config) { struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp); int err; @@ -1571,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port, u16 ing_types, u16 egr_types, - struct hwtstamp_config new_config) + struct kernel_hwtstamp_config new_config) { struct mlxsw_sp2_ptp_state *ptp_state; int err; @@ -1592,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config new_config) + struct kernel_hwtstamp_config new_config) { struct mlxsw_sp2_ptp_state *ptp_state; int err; @@ -1614,11 +1615,12 @@ err_ptp_disable: } int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { + struct kernel_hwtstamp_config new_config; struct mlxsw_sp2_ptp_state *ptp_state; enum hwtstamp_rx_filters rx_filter; - struct hwtstamp_config new_config; u16 new_ing_types, new_egr_types; bool ptp_enabled; int err; @@ -1652,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->ptp.ing_types = new_ing_types; mlxsw_sp_port->ptp.egr_types = new_egr_types; - /* Notify the ioctl caller what we are actually timestamping. */ + /* Notify the caller what we are actually timestamping. */ config->rx_filter = rx_filter; mutex_unlock(&ptp_state->lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 102db9060135..df37f1470830 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -34,10 +34,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, u64 timestamp); int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void mlxsw_sp1_ptp_shaper_work(struct work_struct *work); @@ -65,10 +66,11 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, u16 local_port); int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, struct kernel_ethtool_ts_info *info); @@ -117,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress, static inline int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { return -EOPNOTSUPP; } static inline int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -181,14 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp, static inline int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { return -EOPNOTSUPP; } static inline int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index 831921b9d4d5..3ba527514f1e 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -27,6 +27,7 @@ config FBNIC select NET_DEVLINK select PAGE_POOL select PHYLINK + select PLDMFW help This driver supports Meta Platforms Host Network Interface. diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index de6b1a340f55..65815d4f379e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -19,6 +19,7 @@ struct fbnic_napi_vector; #define FBNIC_MAX_NAPI_VECTORS 128u +#define FBNIC_MBX_CMPL_SLOTS 4 struct fbnic_dev { struct device *dev; @@ -42,7 +43,7 @@ struct fbnic_dev { struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; struct fbnic_fw_cap fw_cap; - struct fbnic_fw_completion *cmpl_data; + struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS]; /* Lock protecting Tx Mailbox queue to prevent possible races */ spinlock_t fw_tx_lock; @@ -81,6 +82,9 @@ struct fbnic_dev { /* Local copy of hardware statistics */ struct fbnic_hw_stats hw_stats; + + /* Lock protecting access to hw_stats */ + spinlock_t hw_stats_lock; }; /* Reserve entry 0 in the MSI-X "others" array until we have filled all diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 51bee8072420..36393a17d92d 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -397,6 +397,15 @@ enum { #define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1) #define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2) +#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */ +#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */ +#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */ +#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */ +#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */ +#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */ +#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */ +#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */ + #define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */ #define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0) enum { @@ -432,6 +441,11 @@ enum { #define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */ #define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */ #define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0) +#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */ +#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */ +#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */ +#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */ +#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */ #define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ /* Precision Time Protocol Registers */ @@ -485,6 +499,14 @@ enum { FBNIC_RXB_FIFO_INDICES = 8 }; +enum { + FBNIC_RXB_INTF_NET = 0, + FBNIC_RXB_INTF_RBT = 1, + /* Unused */ + /* Unused */ + FBNIC_RXB_INTF_INDICES = 4 +}; + #define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */ #define FBNIC_RXB_CT_SIZE_CNT 8 #define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0) @@ -866,6 +888,12 @@ enum { #define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */ #define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */ +/* Tx Work Queue Statistics Registers */ +#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */ +#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */ +#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */ +#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */ + /* Tx Completion Queue Registers */ #define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */ #define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0) @@ -955,6 +983,12 @@ enum { FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2, }; +/* Rx Per CQ Statistics Counters */ +#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */ +#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */ +#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */ +#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */ + /* Rx Interrupt Manager Registers */ #define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */ #define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c index 0072d612215e..4c4938eedd7b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -3,10 +3,12 @@ #include <linux/unaligned.h> #include <linux/pci.h> +#include <linux/pldmfw.h> #include <linux/types.h> #include <net/devlink.h> #include "fbnic.h" +#include "fbnic_tlv.h" #define FBNIC_SN_STR_LEN 24 @@ -109,8 +111,262 @@ static int fbnic_devlink_info_get(struct devlink *devlink, return 0; } +static bool +fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pldmfw_desc_tlv *desc; + u32 anti_rollback_ver = 0; + struct devlink *devlink; + struct fbnic_dev *fbd; + struct pci_dev *pdev; + + /* First, use the standard PCI matching function */ + if (!pldmfw_op_pci_match_record(context, record)) + return false; + + pdev = to_pci_dev(context->dev); + fbd = pci_get_drvdata(pdev); + devlink = priv_to_devlink(fbd); + + /* If PCI match is successful, check for vendor-specific descriptors */ + list_for_each_entry(desc, &record->descs, entry) { + if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED) + continue; + + if (desc->size < 21 || desc->data[0] != 1 || + desc->data[1] != 15) + continue; + + if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0) + continue; + + anti_rollback_ver = get_unaligned_le32(desc->data + 17); + break; + } + + /* Compare versions and return error if they do not match */ + if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) { + char buf[128]; + + snprintf(buf, sizeof(buf), + "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!", + anti_rollback_ver, fbd->fw_cap.anti_rollback_version); + devlink_flash_update_status_notify(devlink, buf, + "Anti-Rollback", 0, 0); + + return false; + } + + return true; +} + +static int +fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component) +{ + struct fbnic_fw_completion *cmpl; + int err; + + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); + if (!cmpl) + return -ENOMEM; + + err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl, + component->identifier, + component->component_size); + if (err) + goto cmpl_free; + + /* Wait for firmware to ack firmware upgrade start */ + if (wait_for_completion_timeout(&cmpl->done, 10 * HZ)) + err = cmpl->result; + else + err = -ETIMEDOUT; + + fbnic_fw_clear_cmpl(fbd, cmpl); +cmpl_free: + fbnic_fw_put_cmpl(cmpl); + + return err; +} + +static int +fbnic_flash_component(struct pldmfw *context, + struct pldmfw_component *component) +{ + const u8 *data = component->component_data; + const u32 size = component->component_size; + struct fbnic_fw_completion *cmpl; + const char *component_name; + struct devlink *devlink; + struct fbnic_dev *fbd; + struct pci_dev *pdev; + u32 offset = 0; + u32 length = 0; + char buf[32]; + int err; + + pdev = to_pci_dev(context->dev); + fbd = pci_get_drvdata(pdev); + devlink = priv_to_devlink(fbd); + + switch (component->identifier) { + case QSPI_SECTION_CMRT: + component_name = "boot1"; + break; + case QSPI_SECTION_CONTROL_FW: + component_name = "boot2"; + break; + case QSPI_SECTION_OPTION_ROM: + component_name = "option-rom"; + break; + default: + snprintf(buf, sizeof(buf), "Unknown component ID %u!", + component->identifier); + devlink_flash_update_status_notify(devlink, buf, NULL, 0, + size); + return -EINVAL; + } + + /* Once firmware receives the request to start upgrading it responds + * with two messages: + * 1. An ACK that it received the message and possible error code + * indicating that an upgrade is not currently possible. + * 2. A request for the first chunk of data + * + * Setup completions for write before issuing the start message so + * the driver can catch both messages. + */ + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ); + if (!cmpl) + return -ENOMEM; + + err = fbnic_mbx_set_cmpl(fbd, cmpl); + if (err) + goto cmpl_free; + + devlink_flash_update_timeout_notify(devlink, "Initializing", + component_name, 15); + err = fbnic_flash_start(fbd, component); + if (err) + goto err_no_msg; + + while (offset < size) { + if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) { + err = -ETIMEDOUT; + break; + } + + err = cmpl->result; + if (err) + break; + + /* Verify firmware is requesting the next chunk in the seq. */ + if (cmpl->u.fw_update.offset != offset + length) { + err = -EFAULT; + break; + } + + offset = cmpl->u.fw_update.offset; + length = cmpl->u.fw_update.length; + + if (length > TLV_MAX_DATA || offset + length > size) { + err = -EFAULT; + break; + } + + devlink_flash_update_status_notify(devlink, "Flashing", + component_name, + offset, size); + + /* Mailbox will set length to 0 once it receives the finish + * message. + */ + if (!length) + continue; + + reinit_completion(&cmpl->done); + err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length, + 0); + if (err) + break; + } + + if (err) { + fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err); +err_no_msg: + snprintf(buf, sizeof(buf), "Mailbox encountered error %d!", + err); + devlink_flash_update_status_notify(devlink, buf, + component_name, 0, 0); + } + + fbnic_fw_clear_cmpl(fbd, cmpl); +cmpl_free: + fbnic_fw_put_cmpl(cmpl); + + return err; +} + +static const struct pldmfw_ops fbnic_pldmfw_ops = { + .match_record = fbnic_pldm_match_record, + .flash_component = fbnic_flash_component, +}; + +static int +fbnic_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct fbnic_dev *fbd = devlink_priv(devlink); + const struct firmware *fw = params->fw; + struct device *dev = fbd->dev; + struct pldmfw context; + char *err_msg; + int err; + + context.ops = &fbnic_pldmfw_ops; + context.dev = dev; + + err = pldmfw_flash_image(&context, fw); + if (err) { + switch (err) { + case -EINVAL: + err_msg = "Invalid image"; + break; + case -EOPNOTSUPP: + err_msg = "Unsupported image"; + break; + case -ENOMEM: + err_msg = "Out of memory"; + break; + case -EFAULT: + err_msg = "Invalid header"; + break; + case -ENOENT: + err_msg = "No matching record"; + break; + case -ENODEV: + err_msg = "No matching device"; + break; + case -ETIMEDOUT: + err_msg = "Timed out waiting for reply"; + break; + default: + err_msg = "Unknown error"; + break; + } + + NL_SET_ERR_MSG_FMT_MOD(extack, + "Failed to flash PLDM Image: %s (error: %d)", + err_msg, err); + } + + return err; +} + static const struct devlink_ops fbnic_devlink_ops = { - .info_get = fbnic_devlink_info_get, + .info_get = fbnic_devlink_info_get, + .flash_update = fbnic_devlink_flash_update, }; void fbnic_devlink_free(struct fbnic_dev *fbd) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 0a751a2aaf73..5c7556c8c4c5 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -27,6 +27,19 @@ struct fbnic_stat { FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat) static const struct fbnic_stat fbnic_gstrings_hw_stats[] = { + /* TTI */ + FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames), + FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes), + FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames), + FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes), + FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames), + FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes), + + /* TMI */ + FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req), + FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts), + FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts), + /* RPC */ FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype), FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr), @@ -39,7 +52,64 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = { }; #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats) -#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN + +#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = { + FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err), + + FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames), + FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes), +}; + +#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \ + ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats) + +#define FBNIC_RXB_FIFO_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = { + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level), +}; + +#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats) + +#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = { + FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames), + FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes), + FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames), + FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes), +}; + +#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \ + ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats) + +#define FBNIC_HW_Q_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value) + +static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = { + FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err), + FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop), + FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop), +}; + +#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats) +#define FBNIC_HW_STATS_LEN \ + (FBNIC_HW_FIXED_STATS_LEN + \ + FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \ + FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \ + FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \ + FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES) static void fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) @@ -298,31 +368,125 @@ err_free_clone: return err; } -static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) +static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx) +{ + const struct fbnic_stat *stat; + int i; + + stat = fbnic_gstrings_rxb_enqueue_stats; + for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx) +{ + const struct fbnic_stat *stat; + int i; + + stat = fbnic_gstrings_rxb_fifo_stats; + for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx) { + const struct fbnic_stat *stat; int i; + stat = fbnic_gstrings_rxb_dequeue_stats; + for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + const struct fbnic_stat *stat; + int i, idx; + switch (sset) { case ETH_SS_STATS: - for (i = 0; i < FBNIC_HW_STATS_LEN; i++) + for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++) ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string); + + for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) + fbnic_get_rxb_enqueue_strings(&data, i); + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_strings(&data, i); + + for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) + fbnic_get_rxb_dequeue_strings(&data, i); + + for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) { + stat = fbnic_gstrings_hw_q_stats; + + for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++) + ethtool_sprintf(&data, stat->string, idx); + } break; } } +static void fbnic_report_hw_stats(const struct fbnic_stat *stat, + const void *base, int len, u64 **data) +{ + while (len--) { + u8 *curr = (u8 *)base + stat->offset; + + **data = *(u64 *)curr; + + stat++; + (*data)++; + } +} + static void fbnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct fbnic_net *fbn = netdev_priv(dev); - const struct fbnic_stat *stat; + struct fbnic_dev *fbd = fbn->fbd; int i; fbnic_get_hw_stats(fbn->fbd); - for (i = 0; i < FBNIC_HW_STATS_LEN; i++) { - stat = &fbnic_gstrings_hw_stats[i]; - data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset); + spin_lock(&fbd->hw_stats_lock); + fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats, + FBNIC_HW_FIXED_STATS_LEN, &data); + + for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) { + const struct fbnic_rxb_enqueue_stats *enq; + + enq = &fbd->hw_stats.rxb.enq[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats, + enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) { + const struct fbnic_rxb_fifo_stats *fifo; + + fifo = &fbd->hw_stats.rxb.fifo[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats, + fifo, FBNIC_HW_RXB_FIFO_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) { + const struct fbnic_rxb_dequeue_stats *deq; + + deq = &fbd->hw_stats.rxb.deq[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats, + deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_MAX_QUEUES; i++) { + const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i]; + + fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q, + FBNIC_HW_Q_STATS_LEN, &data); } + spin_unlock(&fbd->hw_stats_lock); } static int fbnic_get_sset_count(struct net_device *dev, int sset) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 3d9636a6c968..e2368075ab8c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -237,6 +237,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd, return err; } +static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + int free = -EXFULL; + int i; + + if (!tx_mbx->ready) + return -ENODEV; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (!fbd->cmpl_data[i]) + free = i; + else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type) + return -EEXIST; + } + + if (free == -EXFULL) + return -EXFULL; + + fbd->cmpl_data[free] = cmpl_data; + + return 0; +} + +static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + int i; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (fbd->cmpl_data[i] == cmpl_data) { + fbd->cmpl_data[i] = NULL; + break; + } + } +} + static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) { struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; @@ -258,6 +296,19 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) tx_mbx->head = head; } +int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return err; +} + static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, struct fbnic_tlv_msg *msg, struct fbnic_fw_completion *cmpl_data) @@ -266,23 +317,20 @@ static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, int err; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - - /* If we are already waiting on a completion then abort */ - if (cmpl_data && fbd->cmpl_data) { - err = -EBUSY; - goto unlock_mbx; + if (cmpl_data) { + err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); + if (err) + goto unlock_mbx; } - /* Record completion location and submit request */ - if (cmpl_data) - fbd->cmpl_data = cmpl_data; - err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); - /* If msg failed then clear completion data for next caller */ + /* If we successfully reserved a completion and msg failed + * then clear completion data for next caller + */ if (err && cmpl_data) - fbd->cmpl_data = NULL; + fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data); unlock_mbx: spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); @@ -304,12 +352,18 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type) { struct fbnic_fw_completion *cmpl_data = NULL; unsigned long flags; + int i; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) { - cmpl_data = fbd->cmpl_data; - kref_get(&fbd->cmpl_data->ref_count); + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (fbd->cmpl_data[i] && + fbd->cmpl_data[i]->msg_type == msg_type) { + cmpl_data = fbd->cmpl_data[i]; + kref_get(&cmpl_data->ref_count); + break; + } } + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); return cmpl_data; @@ -464,6 +518,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = { FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION), FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION), FBNIC_TLV_ATTR_LAST }; @@ -586,6 +641,9 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present) fbd->fw_cap.all_multi = all_multi; + fbd->fw_cap.anti_rollback_version = + fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION); + return 0; } @@ -708,6 +766,188 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) dev_warn(fbd->dev, "Failed to send heartbeat message\n"); } +int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data, + unsigned int id, unsigned int len) +{ + struct fbnic_tlv_msg *msg; + int err; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + if (!len) + return -EINVAL; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); + if (!msg) + return -ENOMEM; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH, + len); + if (err) + goto free_message; + + err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data); + if (err) + goto free_message; + + return 0; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = { + FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + s32 err; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Check for errors */ + err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR); + + cmpl_data->result = err; + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + +int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, + const u8 *data, u32 offset, u16 length, + int cancel_error) +{ + struct fbnic_tlv_msg *msg; + int err; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP); + if (!msg) + return -ENOMEM; + + /* Report error to FW to cancel upgrade */ + if (cancel_error) { + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR, + cancel_error); + if (err) + goto free_message; + } + + if (data) { + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET, + offset); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH, + length); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA, + data + offset, length); + if (err) + goto free_message; + } + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + return 0; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = { + FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET), + FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_write_chunk_req(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + u32 offset; + u32 length; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Pull length/offset pair and mark it as complete */ + offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET); + length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH); + cmpl_data->u.fw_update.offset = offset; + cmpl_data->u.fw_update.length = length; + + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + +static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = { + FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + s32 err; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Check for errors */ + err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR); + + /* Close out update by incrementing offset by length which should + * match the total size of the component. Set length to 0 since no + * new chunks will be requested. + */ + cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length; + cmpl_data->u.fw_update.length = 0; + + cmpl_data->result = err; + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + /** * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request * @fbd: FBNIC device structure @@ -792,6 +1032,15 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { fbnic_fw_parse_ownership_resp), FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, fbnic_fw_parse_heartbeat_resp), + FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP, + fbnic_fw_start_upgrade_resp_index, + fbnic_fw_parse_fw_start_upgrade_resp), + FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ, + fbnic_fw_write_chunk_req_index, + fbnic_fw_parse_fw_write_chunk_req), + FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ, + fbnic_fw_finish_upgrade_req_index, + fbnic_fw_parse_fw_finish_upgrade_req), FBNIC_TLV_PARSER(TSENE_READ_RESP, fbnic_tsene_read_resp_index, fbnic_fw_parse_tsene_read_resp), @@ -921,10 +1170,16 @@ static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data) static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd) { - if (fbd->cmpl_data) { - __fbnic_fw_evict_cmpl(fbd->cmpl_data); - fbd->cmpl_data = NULL; + int i; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i]; + + if (cmpl_data) + __fbnic_fw_evict_cmpl(cmpl_data); } + + memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data)); } void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) @@ -977,20 +1232,28 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, fw_version, str_sz); } -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl, - u32 msg_type) +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type) { - fw_cmpl->msg_type = msg_type; - init_completion(&fw_cmpl->done); - kref_init(&fw_cmpl->ref_count); + struct fbnic_fw_completion *cmpl; + + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) + return NULL; + + cmpl->msg_type = msg_type; + init_completion(&cmpl->done); + kref_init(&cmpl->ref_count); + + return cmpl; } -void fbnic_fw_clear_compl(struct fbnic_dev *fbd) +void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *fw_cmpl) { unsigned long flags; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - fbd->cmpl_data = NULL; + fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl); spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index a3618e7826c2..08bc4b918de7 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -42,6 +42,7 @@ struct fbnic_fw_cap { u8 all_multi : 1; u8 link_speed; u8 link_fec; + u32 anti_rollback_version; }; struct fbnic_fw_completion { @@ -51,6 +52,10 @@ struct fbnic_fw_completion { int result; union { struct { + u32 offset; + u32 length; + } fw_update; + struct { s32 millivolts; s32 millidegrees; } tsene; @@ -59,17 +64,25 @@ struct fbnic_fw_completion { void fbnic_mbx_init(struct fbnic_dev *fbd); void fbnic_mbx_clean(struct fbnic_dev *fbd); +int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data); void fbnic_mbx_poll(struct fbnic_dev *fbd); int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd); void fbnic_mbx_flush_tx(struct fbnic_dev *fbd); int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); +int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data, + unsigned int id, unsigned int len); +int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, + const u8 *data, u32 offset, u16 length, + int cancel_error); int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, struct fbnic_fw_completion *cmpl_data); -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data, - u32 msg_type); -void fbnic_fw_clear_compl(struct fbnic_dev *fbd); +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type); +void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data); void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data); #define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \ @@ -86,6 +99,15 @@ do { \ #define fbnic_mk_fw_ver_str(_rev_id, _str) \ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str)) +enum { + QSPI_SECTION_CMRT = 0, + QSPI_SECTION_CONTROL_FW = 1, + QSPI_SECTION_UCODE = 2, + QSPI_SECTION_OPTION_ROM = 3, + QSPI_SECTION_USER = 4, + QSPI_SECTION_INVALID, +}; + #define FW_HEARTBEAT_PERIOD (10 * HZ) enum { @@ -95,6 +117,12 @@ enum { FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13, FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14, FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15, + FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22, + FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23, + FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24, + FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25, + FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28, + FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29, FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C, FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D, }; @@ -122,6 +150,7 @@ enum { FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10, FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12, + FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15, FBNIC_FW_CAP_RESP_MSG_MAX }; @@ -149,4 +178,25 @@ enum { FBNIC_FW_OWNERSHIP_FLAG = 0x0, FBNIC_FW_OWNERSHIP_MSG_MAX }; + +enum { + FBNIC_FW_START_UPGRADE_ERROR = 0x0, + FBNIC_FW_START_UPGRADE_SECTION = 0x1, + FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2, + FBNIC_FW_START_UPGRADE_MSG_MAX +}; + +enum { + FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0, + FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1, + FBNIC_FW_WRITE_CHUNK_DATA = 0x2, + FBNIC_FW_WRITE_CHUNK_ERROR = 0x3, + FBNIC_FW_WRITE_CHUNK_MSG_MAX +}; + +enum { + FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0, + FBNIC_FW_FINISH_UPGRADE_MSG_MAX +}; + #endif /* _FBNIC_FW_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c index 89ac6bc8c7fc..4223d8100e64 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c @@ -70,6 +70,100 @@ static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset, stat->u.old_reg_value_64 = new_reg_value; } +static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TMI_ILLEGAL_PTP_REQS, + &tmi->ptp_illegal_req); + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); +} + +static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TMI_ILLEGAL_PTP_REQS, + &tmi->ptp_illegal_req); + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); +} + +static void fbnic_get_tmi_stats(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); +} + +static void fbnic_reset_tti_stats(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TTI_CM_DROP_PKTS, + &tti->cm_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TTI_CM_DROP_BYTE_L, + 1, + &tti->cm_drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TTI_FRAME_DROP_PKTS, + &tti->frame_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, + 1, + &tti->frame_drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TBI_DROP_PKTS, + &tti->tbi_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TBI_DROP_BYTE_L, + 1, + &tti->tbi_drop.bytes); +} + +static void fbnic_get_tti_stats32(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TTI_CM_DROP_PKTS, + &tti->cm_drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TTI_FRAME_DROP_PKTS, + &tti->frame_drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TBI_DROP_PKTS, + &tti->tbi_drop.frames); +} + +static void fbnic_get_tti_stats(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TTI_CM_DROP_BYTE_L, + 1, + &tti->cm_drop.bytes); + + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, + 1, + &tti->frame_drop.bytes); + + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TBI_DROP_BYTE_L, + 1, + &tti->tbi_drop.bytes); +} + static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd, struct fbnic_rpc_stats *rpc) { @@ -117,6 +211,221 @@ static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd, &rpc->ovr_size_err); } +static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), + &fifo->drop.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, + &fifo->drop.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), + &fifo->trunc.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, + &fifo->trunc.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), + &fifo->trans_drop); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), + &fifo->trans_ecn); + + fifo->level.u.old_reg_value_32 = 0; +} + +static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), + &enq->drbo.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, + &enq->drbo.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), + &enq->integrity_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i), + &enq->mac_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i), + &enq->parser_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i), + &enq->frm_err); +} + +static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), + &deq->intf.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, + &deq->intf.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), + &deq->pbuf.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, + &deq->pbuf.bytes); +} + +static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]); + fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), + &fifo->drop.frames); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), + &fifo->trunc.frames); + + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), + &fifo->trans_drop); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), + &fifo->trans_ecn); + + fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i)); +} + +static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, + &fifo->drop.bytes); + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, + &fifo->trunc.bytes); + + fbnic_get_rxb_fifo_stats32(fbd, i, fifo); +} + +static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), + &enq->drbo.frames); + + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), + &enq->integrity_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i), + &enq->mac_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i), + &enq->parser_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i), + &enq->frm_err); +} + +static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, + &enq->drbo.bytes); + + fbnic_get_rxb_enq_stats32(fbd, i, enq); +} + +static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), + &deq->intf.frames); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), + &deq->pbuf.frames); +} + +static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, + &deq->intf.bytes); + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, + &deq->pbuf.bytes); + + fbnic_get_rxb_deq_stats32(fbd, i, deq); +} + +static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]); + fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_get_rxb_stats(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]); + fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + int i; + + for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { + u32 base = FBNIC_QUEUE(i); + + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, + &hw_q->rde_pkt_err); + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, + &hw_q->rde_pkt_cq_drop); + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, + &hw_q->rde_pkt_bdq_drop); + } +} + +static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + int i; + + for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { + u32 base = FBNIC_QUEUE(i); + + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, + &hw_q->rde_pkt_err); + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, + &hw_q->rde_pkt_cq_drop); + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, + &hw_q->rde_pkt_bdq_drop); + } +} + +void fbnic_get_hw_q_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + spin_lock(&fbd->hw_stats_lock); + fbnic_get_hw_rxq_stats32(fbd, hw_q); + spin_unlock(&fbd->hw_stats_lock); +} + static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd, struct fbnic_pcie_stats *pcie) { @@ -203,18 +512,40 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd, void fbnic_reset_hw_stats(struct fbnic_dev *fbd) { + spin_lock(&fbd->hw_stats_lock); + fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi); + fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti); fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc); + fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb); + fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q); fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie); + spin_unlock(&fbd->hw_stats_lock); } -void fbnic_get_hw_stats32(struct fbnic_dev *fbd) +static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd) { + fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi); + fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti); fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc); + fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb); + fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q); +} + +void fbnic_get_hw_stats32(struct fbnic_dev *fbd) +{ + spin_lock(&fbd->hw_stats_lock); + __fbnic_get_hw_stats32(fbd); + spin_unlock(&fbd->hw_stats_lock); } void fbnic_get_hw_stats(struct fbnic_dev *fbd) { - fbnic_get_hw_stats32(fbd); + spin_lock(&fbd->hw_stats_lock); + __fbnic_get_hw_stats32(fbd); + fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi); + fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti); + fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb); fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie); + spin_unlock(&fbd->hw_stats_lock); } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h index 78df56b87745..07e54bb75bf3 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h @@ -17,6 +17,11 @@ struct fbnic_stat_counter { bool reported; }; +struct fbnic_hw_stat { + struct fbnic_stat_counter frames; + struct fbnic_stat_counter bytes; +}; + struct fbnic_eth_mac_stats { struct fbnic_stat_counter FramesTransmittedOK; struct fbnic_stat_counter FramesReceivedOK; @@ -37,12 +42,49 @@ struct fbnic_mac_stats { struct fbnic_eth_mac_stats eth_mac; }; +struct fbnic_tmi_stats { + struct fbnic_hw_stat drop; + struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts; +}; + +struct fbnic_tti_stats { + struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop; +}; + struct fbnic_rpc_stats { struct fbnic_stat_counter unkn_etype, unkn_ext_hdr; struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp; struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err; }; +struct fbnic_rxb_enqueue_stats { + struct fbnic_hw_stat drbo; + struct fbnic_stat_counter integrity_err, mac_err; + struct fbnic_stat_counter parser_err, frm_err; +}; + +struct fbnic_rxb_fifo_stats { + struct fbnic_hw_stat drop, trunc; + struct fbnic_stat_counter trans_drop, trans_ecn; + struct fbnic_stat_counter level; +}; + +struct fbnic_rxb_dequeue_stats { + struct fbnic_hw_stat intf, pbuf; +}; + +struct fbnic_rxb_stats { + struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES]; + struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES]; + struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES]; +}; + +struct fbnic_hw_q_stats { + struct fbnic_stat_counter rde_pkt_err; + struct fbnic_stat_counter rde_pkt_cq_drop; + struct fbnic_stat_counter rde_pkt_bdq_drop; +}; + struct fbnic_pcie_stats { struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword; struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword; @@ -55,13 +97,19 @@ struct fbnic_pcie_stats { struct fbnic_hw_stats { struct fbnic_mac_stats mac; + struct fbnic_tmi_stats tmi; + struct fbnic_tti_stats tti; struct fbnic_rpc_stats rpc; + struct fbnic_rxb_stats rxb; + struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES]; struct fbnic_pcie_stats pcie; }; u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset); void fbnic_reset_hw_stats(struct fbnic_dev *fbd); +void fbnic_get_hw_q_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q); void fbnic_get_hw_stats32(struct fbnic_dev *fbd); void fbnic_get_hw_stats(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index dde4a37116e2..10e108c1fcd0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -687,13 +687,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, int err = 0, retries = 5; s32 *sensor; - fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL); + fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP); if (!fw_cmpl) return -ENOMEM; - /* Initialize completion and queue it for FW to process */ - fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP); - switch (id) { case FBNIC_SENSOR_TEMP: sensor = &fw_cmpl->u.tsene.millidegrees; @@ -744,7 +741,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, *val = *sensor; exit_cleanup: - fbnic_fw_clear_compl(fbd); + fbnic_fw_clear_cmpl(fbd, fw_cmpl); exit_free: fbnic_fw_put_cmpl(fw_cmpl); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 2524d9b88d59..aa812c63d5af 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -404,12 +404,16 @@ static int fbnic_hwtstamp_set(struct net_device *netdev, static void fbnic_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { + u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0; u64 tx_bytes, tx_packets, tx_dropped = 0; - u64 rx_bytes, rx_packets, rx_dropped = 0; struct fbnic_net *fbn = netdev_priv(dev); + struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; + u64 rx_over = 0, rx_missed = 0; unsigned int start, i; + fbnic_get_hw_stats(fbd); + stats = &fbn->tx_stats; tx_bytes = stats->bytes; @@ -420,6 +424,12 @@ static void fbnic_get_stats64(struct net_device *dev, stats64->tx_packets = tx_packets; stats64->tx_dropped = tx_dropped; + /* Record drops from Tx HW Datapath */ + tx_dropped += fbd->hw_stats.tmi.drop.frames.value + + fbd->hw_stats.tti.cm_drop.frames.value + + fbd->hw_stats.tti.frame_drop.frames.value + + fbd->hw_stats.tti.tbi_drop.frames.value; + for (i = 0; i < fbn->num_tx_queues; i++) { struct fbnic_ring *txr = fbn->tx[i]; @@ -445,9 +455,34 @@ static void fbnic_get_stats64(struct net_device *dev, rx_packets = stats->packets; rx_dropped = stats->dropped; + spin_lock(&fbd->hw_stats_lock); + /* Record drops for the host FIFOs. + * 4: network to Host, 6: BMC to Host + * Exclude the BMC and MC FIFOs as those stats may contain drops + * due to unrelated items such as TCAM misses. They are still + * accessible through the ethtool stats. + */ + i = FBNIC_RXB_FIFO_HOST; + rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; + i = FBNIC_RXB_FIFO_BMC_TO_HOST; + rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; + + for (i = 0; i < fbd->max_num_queues; i++) { + /* Report packets dropped due to CQ/BDQ being full/empty */ + rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value; + rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value; + + /* Report packets with errors */ + rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value; + } + spin_unlock(&fbd->hw_stats_lock); + stats64->rx_bytes = rx_bytes; stats64->rx_packets = rx_packets; stats64->rx_dropped = rx_dropped; + stats64->rx_over_errors = rx_over; + stats64->rx_errors = rx_errors; + stats64->rx_missed_errors = rx_missed; for (i = 0; i < fbn->num_rx_queues; i++) { struct fbnic_ring *rxr = fbn->rx[i]; @@ -487,6 +522,7 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, { struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *rxr = fbn->rx[idx]; + struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; u64 bytes, packets, alloc_fail; u64 csum_complete, csum_none; @@ -510,6 +546,15 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, rx->alloc_fail = alloc_fail; rx->csum_complete = csum_complete; rx->csum_none = csum_none; + + fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q); + + spin_lock(&fbd->hw_stats_lock); + rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value + + fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value; + rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value + + rx->hw_drop_overruns; + spin_unlock(&fbd->hw_stats_lock); } static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index 4e8595239c0f..249d3ef862d5 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -6,6 +6,7 @@ #include <linux/pci.h> #include <linux/rtnetlink.h> #include <linux/types.h> +#include <net/devlink.h> #include "fbnic.h" #include "fbnic_drvinfo.h" @@ -292,6 +293,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fbnic_devlink_register(fbd); fbnic_dbg_fbd_init(fbd); + spin_lock_init(&fbd->hw_stats_lock); /* Capture snapshot of hardware stats so netdev can calculate delta */ fbnic_reset_hw_stats(fbd); @@ -387,8 +389,12 @@ static int fbnic_pm_suspend(struct device *dev) rtnl_unlock(); null_uc_addr: + devl_lock(priv_to_devlink(fbd)); + fbnic_fw_free_mbx(fbd); + devl_unlock(priv_to_devlink(fbd)); + /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ fbnic_free_irqs(fbd); @@ -419,11 +425,15 @@ static int __fbnic_pm_resume(struct device *dev) fbd->mac->init_regs(fbd); + devl_lock(priv_to_devlink(fbd)); + /* Re-enable mailbox */ err = fbnic_fw_request_mbx(fbd); if (err) goto err_free_irqs; + devl_unlock(priv_to_devlink(fbd)); + /* No netdev means there isn't a network interface to bring up */ if (fbnic_init_failure(fbd)) return 0; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 1459acfb1e61..64a3b953cc17 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -18,6 +18,8 @@ #define EEPROM_MAC_OFFSET (0x01) #define MAX_EEPROM_SIZE (512) #define MAX_OTP_SIZE (1024) +#define MAX_HS_OTP_SIZE (8 * 1024) +#define MAX_HS_EEPROM_SIZE (64 * 1024) #define OTP_INDICATOR_1 (0xF3) #define OTP_INDICATOR_2 (0xF7) @@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset, int ret; int i; + if (offset + length > MAX_HS_OTP_SIZE) + return -EINVAL; + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (ret < 0) return ret; @@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset, int ret; int i; + if (offset + length > MAX_HS_OTP_SIZE) + return -EINVAL; + ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (ret < 0) return ret; @@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_HS_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (retval < 0) return retval; @@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_HS_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT); if (retval < 0) return retval; @@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) struct lan743x_adapter *adapter = netdev_priv(netdev); if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) - return MAX_OTP_SIZE; + return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE; - return MAX_EEPROM_SIZE; + return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE; } static int lan743x_ethtool_get_eeprom(struct net_device *netdev, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index a70b88037a20..9d70b51ca91d 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) } /* PHY */ -static int lan743x_phy_reset(struct lan743x_adapter *adapter) +static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter) { u32 data; @@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter) 50000, 1000000); } -static int lan743x_phy_init(struct lan743x_adapter *adapter) -{ - return lan743x_phy_reset(adapter); -} - static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) { u32 id_rev; @@ -1729,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter, default: return -ERANGE; } + adapter->rx_tstamp_filter = rx_filter; return 0; } @@ -2499,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx) /* save existing skb, allocate new skb and map to dma */ skb = buffer_info->skb; - if (lan743x_rx_init_ring_element(rx, rx->last_head, - GFP_ATOMIC | GFP_DMA)) { + if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) { /* failed to allocate next skb. * Memory is very low. * Drop this packet and reuse buffer. @@ -3352,8 +3347,6 @@ static int lan743x_netdev_ioctl(struct net_device *netdev, if (!netif_running(netdev)) return -EINVAL; - if (cmd == SIOCSHWTSTAMP) - return lan743x_ptp_ioctl(netdev, ifr, cmd); return phylink_mii_ioctl(adapter->phylink, ifr, cmd); } @@ -3448,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = { .ndo_change_mtu = lan743x_netdev_change_mtu, .ndo_get_stats64 = lan743x_netdev_get_stats64, .ndo_set_mac_address = lan743x_netdev_set_mac_address, + .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get, + .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set, }; static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) @@ -3534,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, if (ret) return ret; - ret = lan743x_phy_init(adapter); - if (ret) - return ret; - ret = lan743x_ptp_init(adapter); if (ret) return ret; @@ -3674,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, if (ret) goto cleanup_pci; + ret = lan743x_hw_reset_phy(adapter); + if (ret) + goto cleanup_pci; + ret = lan743x_hardware_init(adapter, pdev); if (ret) goto cleanup_pci; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index db5fc73e41cc..02a28b709163 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -1087,6 +1087,7 @@ struct lan743x_adapter { phy_interface_t phy_interface; struct phylink *phylink; struct phylink_config phylink_config; + int rx_tstamp_filter; }; #define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 0be44dcb3393..a3b48388b3fd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -463,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, struct lan743x_ptp_perout *perout = &ptp->perout[index]; int ret = 0; - /* Reject requests with unsupported flags */ - if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE) - return -EOPNOTSUPP; - if (on) { perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, perout_request->index); @@ -942,12 +938,6 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on, extts = &ptp->extts[index]; - if (extts_request->flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - if (on) { extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index); if (extts_pin < 0) @@ -1543,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter) ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN; ptp->ptp_clock_info.n_pins = n_pins; ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS; + ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; + ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE; ptp->ptp_clock_info.pin_config = ptp->pin_config; ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine; ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime; @@ -1742,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter, lan743x_ptp_tx_ts_complete(adapter); } -int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +int lan743x_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { struct lan743x_adapter *adapter = netdev_priv(netdev); - struct hwtstamp_config config; - int ret = 0; - int index; + struct lan743x_tx *tx = &adapter->tx[0]; - if (!ifr) { - netif_err(adapter, drv, adapter->netdev, - "SIOCSHWTSTAMP, ifr == NULL\n"); - return -EINVAL; - } + if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC) + config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED) + config->tx_type = HWTSTAMP_TX_ON; + else + config->tx_type = HWTSTAMP_TX_OFF; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + config->rx_filter = adapter->rx_tstamp_filter; - switch (config.tx_type) { + return 0; +} + +int lan743x_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + + switch (config->tx_type) { case HWTSTAMP_TX_OFF: for (index = 0; index < adapter->used_tx_channels; index++) @@ -1782,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) lan743x_ptp_set_sync_ts_insert(adapter, true); break; case HWTSTAMP_TX_ONESTEP_P2P: - ret = -ERANGE; - break; + return -ERANGE; default: netif_warn(adapter, drv, adapter->netdev, - " tx_type = %d, UNKNOWN\n", config.tx_type); - ret = -EINVAL; - break; + " tx_type = %d, UNKNOWN\n", config->tx_type); + return -EINVAL; } - ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter); - - if (!ret) - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; - return ret; + return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter); } diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index 0d29914cd460..e8d073bfa2ca 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -51,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter); void lan743x_ptp_close(struct lan743x_adapter *adapter); void lan743x_ptp_update_latency(struct lan743x_adapter *adapter, u32 link_speed); - -int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int lan743x_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int lan743x_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); #define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 0af143ec0f86..427bdc0e4908 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op) lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD); } +static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type) +{ + lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE); +} + static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) { lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP); @@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb, return err; lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op); + lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type); lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 1efa584e7107..1f9df67f0504 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -75,6 +75,10 @@ #define IFH_REW_OP_ONE_STEP_PTP 0x3 #define IFH_REW_OP_TWO_STEP_PTP 0x4 +#define IFH_PDU_TYPE_NONE 0 +#define IFH_PDU_TYPE_IPV4 7 +#define IFH_PDU_TYPE_IPV6 8 + #define FDMA_RX_DCB_MAX_DBS 1 #define FDMA_TX_DCB_MAX_DBS 1 @@ -254,6 +258,7 @@ struct lan966x_phc { struct lan966x_skb_cb { u8 rew_op; + u8 pdu_type; u16 ts_id; unsigned long jiffies; }; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 63905bb5a63a..b4377b8613c3 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port, *cfg = phc->hwtstamp_config; } -static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) +static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb, + u8 *rew_op, u8 *pdu_type) { struct ptp_header *header; u8 msgtype; int type; - if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) - return IFH_REW_OP_NOOP; + if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } type = ptp_classify_raw(skb); - if (type == PTP_CLASS_NONE) - return IFH_REW_OP_NOOP; + if (type == PTP_CLASS_NONE) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } header = ptp_parse_header(skb, type); - if (!header) - return IFH_REW_OP_NOOP; + if (!header) { + *rew_op = IFH_REW_OP_NOOP; + *pdu_type = IFH_PDU_TYPE_NONE; + return; + } - if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) - return IFH_REW_OP_TWO_STEP_PTP; + if (type & PTP_CLASS_L2) + *pdu_type = IFH_PDU_TYPE_NONE; + if (type & PTP_CLASS_IPV4) + *pdu_type = IFH_PDU_TYPE_IPV4; + if (type & PTP_CLASS_IPV6) + *pdu_type = IFH_PDU_TYPE_IPV6; + + if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) { + *rew_op = IFH_REW_OP_TWO_STEP_PTP; + return; + } /* If it is sync and run 1 step then set the correct operation, * otherwise run as 2 step */ msgtype = ptp_get_msgtype(header, type); - if ((msgtype & 0xf) == 0) - return IFH_REW_OP_ONE_STEP_PTP; + if ((msgtype & 0xf) == 0) { + *rew_op = IFH_REW_OP_ONE_STEP_PTP; + return; + } - return IFH_REW_OP_TWO_STEP_PTP; + *rew_op = IFH_REW_OP_TWO_STEP_PTP; } static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port) @@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port, { struct lan966x *lan966x = port->lan966x; unsigned long flags; + u8 pdu_type; u8 rew_op; - rew_op = lan966x_ptp_classify(port, skb); + lan966x_ptp_classify(port, skb, &rew_op, &pdu_type); LAN966X_SKB_CB(skb)->rew_op = rew_op; + LAN966X_SKB_CB(skb)->pdu_type = pdu_type; if (rew_op != IFH_REW_OP_TWO_STEP_PTP) return 0; @@ -815,10 +838,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp, bool pps = false; int pin; - if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | - PTP_PEROUT_PHASE)) - return -EOPNOTSUPP; - pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index); if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) return -EINVAL; @@ -917,12 +936,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp, if (lan966x->ptp_ext_irq <= 0) return -EOPNOTSUPP; - /* Reject requests with unsupported flags */ - if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_STRICT_FLAGS)) - return -EOPNOTSUPP; - pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index); if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) return -EINVAL; @@ -978,6 +991,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = { .n_per_out = LAN966X_PHC_PINS_NUM, .n_ext_ts = LAN966X_PHC_PINS_NUM, .n_pins = LAN966X_PHC_PINS_NUM, + .supported_extts_flags = PTP_RISING_EDGE | + PTP_STRICT_FLAGS, + .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE, }; static int lan966x_ptp_phc_init(struct lan966x *lan966x, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 4ffaf7588885..3504507477c6 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -391,6 +391,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_INIT_EQ_ID_DB: case GDMA_EQE_HWC_INIT_DATA: case GDMA_EQE_HWC_INIT_DONE: + case GDMA_EQE_HWC_SOC_SERVICE: case GDMA_EQE_RNIC_QP_FATAL: if (!eq->eq.callback) break; @@ -964,6 +965,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) err, resp.hdr.status); return err ? err : -EPROTO; } + gc->pf_cap_flags1 = resp.pf_cap_flags1; if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) { err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout); if (err) { @@ -1004,7 +1006,6 @@ int mana_gd_register_device(struct gdma_dev *gd) return 0; } -EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA"); int mana_gd_deregister_device(struct gdma_dev *gd) { @@ -1035,7 +1036,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd) return err; } -EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA"); u32 mana_gd_wq_avail_space(struct gdma_queue *wq) { @@ -1469,10 +1469,14 @@ static int mana_gd_setup(struct pci_dev *pdev) mana_gd_init_registers(pdev); mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); + gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0); + if (!gc->service_wq) + return -ENOMEM; + err = mana_gd_setup_irqs(pdev); if (err) { dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); - return err; + goto free_workqueue; } err = mana_hwc_create_channel(gc); @@ -1498,6 +1502,8 @@ destroy_hwc: mana_hwc_destroy_channel(gc); remove_irq: mana_gd_remove_irqs(pdev); +free_workqueue: + destroy_workqueue(gc->service_wq); dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); return err; } @@ -1509,6 +1515,8 @@ static void mana_gd_cleanup(struct pci_dev *pdev) mana_hwc_destroy_channel(gc); mana_gd_remove_irqs(pdev); + + destroy_workqueue(gc->service_wq); dev_dbg(&pdev->dev, "mana gdma cleanup successful\n"); } @@ -1578,8 +1586,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_gd; + err = mana_rdma_probe(&gc->mana_ib); + if (err) + goto cleanup_mana; + return 0; +cleanup_mana: + mana_remove(&gc->mana, false); cleanup_gd: mana_gd_cleanup(pdev); unmap_bar: @@ -1607,6 +1621,7 @@ static void mana_gd_remove(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, false); mana_gd_cleanup(pdev); @@ -1630,6 +1645,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state) { struct gdma_context *gc = pci_get_drvdata(pdev); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, true); mana_gd_cleanup(pdev); @@ -1654,6 +1670,10 @@ static int mana_gd_resume(struct pci_dev *pdev) if (err) return err; + err = mana_rdma_probe(&gc->mana_ib); + if (err) + return err; + return 0; } @@ -1664,6 +1684,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev) dev_info(&pdev->dev, "Shutdown was called\n"); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, true); mana_gd_cleanup(pdev); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 1ba49602089b..a8c4d8db75a5 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -112,11 +112,13 @@ out: static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, struct gdma_event *event) { + union hwc_init_soc_service_type service_data; struct hw_channel_context *hwc = ctx; struct gdma_dev *gd = hwc->gdma_dev; union hwc_init_type_data type_data; union hwc_init_eq_id_db eq_db; u32 type, val; + int ret; switch (event->type) { case GDMA_EQE_HWC_INIT_EQ_ID_DB: @@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, } break; + case GDMA_EQE_HWC_SOC_SERVICE: + service_data.as_uint32 = event->details[0]; + type = service_data.type; + switch (type) { + case GDMA_SERVICE_TYPE_RDMA_SUSPEND: + case GDMA_SERVICE_TYPE_RDMA_RESUME: + ret = mana_rdma_service_event(gd->gdma_context, type); + if (ret) + dev_err(hwc->dev, "Failed to schedule adev service event: %d\n", + ret); + break; + default: + dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type); + break; + } + + break; default: dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type); /* Ignore unknown events, which should never happen. */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 2bac6be8f6a0..ccd2885c939e 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -921,7 +921,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc) static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, u32 proto_minor_ver, u32 proto_micro_ver, - u16 *max_num_vports) + u16 *max_num_vports, u8 *bm_hostmode) { struct gdma_context *gc = ac->gdma_dev->gdma_context; struct mana_query_device_cfg_resp resp = {}; @@ -932,7 +932,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, sizeof(req), sizeof(resp)); - req.hdr.resp.msg_version = GDMA_MESSAGE_V2; + req.hdr.resp.msg_version = GDMA_MESSAGE_V3; req.proto_major_ver = proto_major_ver; req.proto_minor_ver = proto_minor_ver; @@ -956,11 +956,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, *max_num_vports = resp.max_num_vports; - if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2) + if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2) gc->adapter_mtu = resp.adapter_mtu; else gc->adapter_mtu = ETH_FRAME_LEN; + if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3) + *bm_hostmode = resp.bm_hostmode; + else + *bm_hostmode = 0; + debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu); return 0; @@ -2441,7 +2446,7 @@ static void mana_destroy_vport(struct mana_port_context *apc) mana_destroy_txq(apc); mana_uncfg_vport(apc); - if (gd->gdma_context->is_pf) + if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) mana_pf_deregister_hw_vport(apc); } @@ -2453,7 +2458,7 @@ static int mana_create_vport(struct mana_port_context *apc, apc->default_rxobj = INVALID_MANA_HANDLE; - if (gd->gdma_context->is_pf) { + if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { err = mana_pf_register_hw_vport(apc); if (err) return err; @@ -2689,7 +2694,7 @@ int mana_alloc_queues(struct net_device *ndev) goto destroy_vport; } - if (gd->gdma_context->is_pf) { + if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) { err = mana_pf_register_filter(apc); if (err) goto destroy_vport; @@ -2751,7 +2756,7 @@ static int mana_dealloc_queues(struct net_device *ndev) mana_chn_setxdp(apc, NULL); - if (gd->gdma_context->is_pf) + if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) mana_pf_deregister_filter(apc); /* No packet can be transmitted now since apc->port_is_up is false. @@ -2945,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd) gd->adev = NULL; } -static int add_adev(struct gdma_dev *gd) +static int add_adev(struct gdma_dev *gd, const char *name) { struct auxiliary_device *adev; struct mana_adev *madev; @@ -2961,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd) goto idx_fail; adev->id = ret; - adev->name = "rdma"; + adev->name = name; adev->dev.parent = gd->gdma_context->dev; adev->dev.release = adev_release; madev->mdev = gd; @@ -2993,11 +2998,76 @@ idx_fail: return ret; } +static void mana_rdma_service_handle(struct work_struct *work) +{ + struct mana_service_work *serv_work = + container_of(work, struct mana_service_work, work); + struct gdma_dev *gd = serv_work->gdma_dev; + struct device *dev = gd->gdma_context->dev; + int ret; + + if (READ_ONCE(gd->rdma_teardown)) + goto out; + + switch (serv_work->event) { + case GDMA_SERVICE_TYPE_RDMA_SUSPEND: + if (!gd->adev || gd->is_suspended) + break; + + remove_adev(gd); + gd->is_suspended = true; + break; + + case GDMA_SERVICE_TYPE_RDMA_RESUME: + if (!gd->is_suspended) + break; + + ret = add_adev(gd, "rdma"); + if (ret) + dev_err(dev, "Failed to add adev on resume: %d\n", ret); + else + gd->is_suspended = false; + break; + + default: + dev_warn(dev, "unknown adev service event %u\n", + serv_work->event); + break; + } + +out: + kfree(serv_work); +} + +int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event) +{ + struct gdma_dev *gd = &gc->mana_ib; + struct mana_service_work *serv_work; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return 0; + } + + serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC); + if (!serv_work) + return -ENOMEM; + + serv_work->event = event; + serv_work->gdma_dev = gd; + + INIT_WORK(&serv_work->work, mana_rdma_service_handle); + queue_work(gc->service_wq, &serv_work->work); + + return 0; +} + int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; struct mana_context *ac = gd->driver_data; struct device *dev = gc->dev; + u8 bm_hostmode = 0; u16 num_ports = 0; int err; int i; @@ -3026,10 +3096,12 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, - MANA_MICRO_VERSION, &num_ports); + MANA_MICRO_VERSION, &num_ports, &bm_hostmode); if (err) goto out; + ac->bm_hostmode = bm_hostmode; + if (!resuming) { ac->num_ports = num_ports; } else { @@ -3077,7 +3149,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } } - err = add_adev(gd); + err = add_adev(gd, "eth"); out: if (err) { mana_remove(gd, false); @@ -3151,6 +3223,44 @@ out: dev_dbg(dev, "%s succeeded\n", __func__); } +int mana_rdma_probe(struct gdma_dev *gd) +{ + int err = 0; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return err; + } + + err = mana_gd_register_device(gd); + if (err) + return err; + + err = add_adev(gd, "rdma"); + if (err) + mana_gd_deregister_device(gd); + + return err; +} + +void mana_rdma_remove(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return; + } + + WRITE_ONCE(gd->rdma_teardown, true); + flush_workqueue(gc->service_wq); + + if (gd->adev) + remove_adev(gd); + + mana_gd_deregister_device(gd); +} + struct net_device *mana_get_primary_netdev(struct mana_context *ac, u32 port_index, netdevice_tracker *tracker) diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 7663d196eaf8..469784d3a1a6 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -870,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev, static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static int ocelot_port_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; int port = priv->port.index; - /* If the attached PHY device isn't capable of timestamping operations, - * use our own (when possible). - */ - if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) { - switch (cmd) { - case SIOCSHWTSTAMP: - return ocelot_hwstamp_set(ocelot, port, ifr); - case SIOCGHWTSTAMP: - return ocelot_hwstamp_get(ocelot, port, ifr); - } - } + ocelot_hwstamp_get(ocelot, port, cfg); - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return 0; +} + +static int ocelot_port_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->port.index; + + return ocelot_hwstamp_set(ocelot, port, cfg, extack); } static int ocelot_change_mtu(struct net_device *dev, int new_mtu) @@ -917,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_set_features = ocelot_set_features, .ndo_setup_tc = ocelot_setup_tc, .ndo_eth_ioctl = ocelot_ioctl, + .ndo_hwtstamp_get = ocelot_port_hwtstamp_get, + .ndo_hwtstamp_set = ocelot_port_hwtstamp_set, }; struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port) diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index cc1088988da0..88b5422cc2a0 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -211,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp, switch (rq->type) { case PTP_CLK_REQ_PEROUT: - /* Reject requests with unsupported flags */ - if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | - PTP_PEROUT_PHASE)) - return -EOPNOTSUPP; - pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT, rq->perout.index); if (pin == 0) @@ -519,47 +514,42 @@ static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd) return 0; } -int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) +void ocelot_hwstamp_get(struct ocelot *ocelot, int port, + struct kernel_hwtstamp_config *cfg) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct hwtstamp_config cfg = {}; switch (ocelot_port->ptp_cmd) { case IFH_REW_OP_TWO_STEP_PTP: - cfg.tx_type = HWTSTAMP_TX_ON; + cfg->tx_type = HWTSTAMP_TX_ON; break; case IFH_REW_OP_ORIGIN_PTP: - cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC; + cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC; break; default: - cfg.tx_type = HWTSTAMP_TX_OFF; + cfg->tx_type = HWTSTAMP_TX_OFF; break; } - cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); } EXPORT_SYMBOL(ocelot_hwstamp_get); -int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) +int ocelot_hwstamp_set(struct ocelot *ocelot, int port, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd; bool l2 = false, l4 = false; - struct hwtstamp_config cfg; - bool old_l2, old_l4; + int ptp_cmd; int err; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - /* Tx type sanity check */ - err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd); + err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd); if (err) return err; - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: @@ -582,27 +572,15 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) return -ERANGE; } - old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2; - old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4; - err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); if (err) return err; ocelot_port->ptp_cmd = ptp_cmd; - cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); - - if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) { - err = -EFAULT; - goto out_restore_ptp_traps; - } + cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); return 0; -out_restore_ptp_traps: - ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4); - ocelot_port->ptp_cmd = old_ptp_cmd; - return err; } EXPORT_SYMBOL(ocelot_hwstamp_set); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 055b55651a49..498eec8ae61d 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -108,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = { .n_ext_ts = 0, .n_per_out = OCELOT_PTP_PINS_NUM, .n_pins = OCELOT_PTP_PINS_NUM, + .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE, .pps = 0, .gettime64 = ocelot_ptp_gettime64, .settime64 = ocelot_ptp_settime64, diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 05606692e631..dd279788cf9e 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); - i = pci_request_regions(pdev, DRV_NAME); + i = pcim_request_all_regions(pdev, DRV_NAME); if (i) goto err_pci_request_regions; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index 671af5d4c5d2..9e7c285eaa6b 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len) } } -static int nfp_net_xfrm_add_state(struct xfrm_state *x, +static int nfp_net_xfrm_add_state(struct net_device *dev, + struct xfrm_state *x, struct netlink_ext_ack *extack) { - struct net_device *netdev = x->xso.real_dev; struct nfp_ipsec_cfg_mssg msg = {}; int i, key_len, trunc_len, err = 0; struct nfp_ipsec_cfg_add_sa *cfg; struct nfp_net *nn; unsigned int saidx; - nn = netdev_priv(netdev); + nn = netdev_priv(dev); cfg = &msg.cfg_add_sa; /* General */ @@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x, return 0; } -static void nfp_net_xfrm_del_state(struct xfrm_state *x) +static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x) { struct nfp_ipsec_cfg_mssg msg = { .cmd = NFP_IPSEC_CFG_MSSG_INV_SA, .sa_idx = x->xso.offload_handle - 1, }; - struct net_device *netdev = x->xso.real_dev; struct nfp_net *nn; int err; - nn = netdev_priv(netdev); + nn = netdev_priv(dev); err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg, sizeof(msg), nfp_net_ipsec_cfg); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c index f1c6c47564b1..08086eb76996 100644 --- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c @@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, case NFP_NET_META_CSUM: meta->csum_type = CHECKSUM_COMPLETE; meta->csum = - (__force __wsum)__get_unaligned_cpu32(data); + (__force __wsum)get_unaligned((u32 *)data); data += 4; break; case NFP_NET_META_RESYNC_INFO: diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index ebeb6ab4465c..ab3cd06ed63e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, case NFP_NET_META_CSUM: meta->csum_type = CHECKSUM_COMPLETE; meta->csum = - (__force __wsum)__get_unaligned_cpu32(data); + (__force __wsum)get_unaligned((u32 *)data); data += 4; break; case NFP_NET_META_RESYNC_INFO: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 95514fabadf2..28997ddab966 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2538,7 +2538,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, nn->dp.num_r_vecs, num_online_cpus()); nn->max_r_vecs = nn->dp.num_r_vecs; - nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools), + nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools), GFP_KERNEL); if (!nn->dp.xsk_pools) { err = -ENOMEM; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index a2d4336d2766..92f30ff2d631 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -948,64 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev, return 0; } -static int ionic_get_module_info(struct net_device *netdev, - struct ethtool_modinfo *modinfo) - -{ - struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_dev *idev = &lif->ionic->idev; - struct ionic_xcvr_status *xcvr; - struct sfp_eeprom_base *sfp; - - xcvr = &idev->port_info->status.xcvr; - sfp = (struct sfp_eeprom_base *) xcvr->sprom; - - /* report the module data type and length */ - switch (sfp->phys_id) { - case SFF8024_ID_SFP: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - break; - case SFF8024_ID_QSFP_8436_8636: - case SFF8024_ID_QSFP28_8636: - case SFF8024_ID_QSFP_PLUS_CMIS: - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; - break; - default: - netdev_info(netdev, "unknown xcvr type 0x%02x\n", - xcvr->sprom[0]); - modinfo->type = 0; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - break; - } - - return 0; -} - -static int ionic_get_module_eeprom(struct net_device *netdev, - struct ethtool_eeprom *ee, - u8 *data) +static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len) { - struct ionic_lif *lif = netdev_priv(netdev); - struct ionic_dev *idev = &lif->ionic->idev; - struct ionic_xcvr_status *xcvr; - char tbuf[sizeof(xcvr->sprom)]; + char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)]; int count = 10; - u32 len; /* The NIC keeps the module prom up-to-date in the DMA space * so we can simply copy the module bytes into the data buffer. */ - xcvr = &idev->port_info->status.xcvr; - len = min_t(u32, sizeof(xcvr->sprom), ee->len); - do { - memcpy(data, &xcvr->sprom[ee->offset], len); - memcpy(tbuf, &xcvr->sprom[ee->offset], len); + memcpy(dst, src, len); + memcpy(tbuf, src, len); /* Let's make sure we got a consistent copy */ - if (!memcmp(data, tbuf, len)) + if (!memcmp(dst, tbuf, len)) break; } while (--count); @@ -1016,6 +972,48 @@ static int ionic_get_module_eeprom(struct net_device *netdev, return 0; } +static int ionic_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + u32 err = -EINVAL; + u8 *src; + + if (!page_data->length) + return -EINVAL; + + if (page_data->bank != 0) { + NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported"); + return -EINVAL; + } + + switch (page_data->page) { + case 0: + src = &idev->port_info->status.xcvr.sprom[page_data->offset]; + break; + case 1: + src = &idev->port_info->sprom_page1[page_data->offset - 128]; + break; + case 2: + src = &idev->port_info->sprom_page2[page_data->offset - 128]; + break; + case 17: + src = &idev->port_info->sprom_page17[page_data->offset - 128]; + break; + default: + return -EOPNOTSUPP; + } + + memset(page_data->data, 0, page_data->length); + err = ionic_do_module_copy(page_data->data, src, page_data->length); + if (err) + return err; + + return page_data->length; +} + static int ionic_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { @@ -1161,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = { .set_rxfh = ionic_set_rxfh, .get_tunable = ionic_get_tunable, .set_tunable = ionic_set_tunable, - .get_module_info = ionic_get_module_info, - .get_module_eeprom = ionic_get_module_eeprom, + .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page, .get_pauseparam = ionic_get_pauseparam, .set_pauseparam = ionic_set_pauseparam, .get_fecparam = ionic_get_fecparam, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 830c8adbfbee..f1ddbe9994a3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -2839,6 +2839,10 @@ union ionic_port_identity { * @status: Port status data * @stats: Port statistics data * @mgmt_stats: Port management statistics data + * @sprom_epage: Extended Transceiver sprom + * @sprom_page1: Extended Transceiver sprom, page 1 + * @sprom_page2: Extended Transceiver sprom, page 2 + * @sprom_page17: Extended Transceiver sprom, page 17 * @rsvd: reserved byte(s) * @pb_stats: uplink pb drop stats */ @@ -2849,8 +2853,17 @@ struct ionic_port_info { struct ionic_port_stats stats; struct ionic_mgmt_port_stats mgmt_stats; }; - /* room for pb_stats to start at 2k offset */ - u8 rsvd[760]; + union { + u8 sprom_epage[384]; + struct { + u8 sprom_page1[128]; + u8 sprom_page2[128]; + u8 sprom_page17[128]; + }; + }; + u8 rsvd[376]; + + /* pb_stats must start at 2k offset */ struct ionic_port_pb_stats pb_stats; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index b7def3b54937..016b575861b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -939,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc); u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* doorbell recovery mechanism */ -void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h index f6cd1b3efdfd..27e91d0d39f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and - * keeps the MCP trace meta data allocated, to support continuous MCP Trace - * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should - * be called to free the meta data. - * - * @p_hwfn: HW device data. - * @dump_buf: MVP trace dump buffer, starting from the header. - * @results_buf: Buffer for printing the mcp trace results. - * - * Return: Error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - char *results_buf); - -/** - * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line - * - * @p_hwfn: HW device data. - * @dump_buf: MCP trace dump buffer, starting from the header. - * @num_dumped_bytes: Number of bytes that were dumped. - * @results_buf: Buffer for printing the mcp trace results. - * - * Return: Error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, - u8 *dump_buf, - u32 num_dumped_bytes, - char *results_buf); - -/** * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. * Should be called after continuous MCP Trace parsing. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 464a72afb758..9c3d3dd2f847 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7614,31 +7614,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, results_buf, &parsed_buf_size, true); } -enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - char *results_buf) -{ - u32 parsed_buf_size; - - return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, - &parsed_buf_size, false); -} - -enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, - u8 *dump_buf, - u32 num_dumped_bytes, - char *results_buf) -{ - u32 parsed_results_bytes; - - return qed_parse_mcp_trace_buf(p_hwfn, - dump_buf, - num_dumped_bytes, - 0, - num_dumped_bytes, - results_buf, &parsed_results_bytes); -} - /* Frees the specified MCP Trace meta data */ void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 86a93cac2647..9659ce5b0712 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) p_hwfn->db_recovery_info.db_recovery_counter = 0; } -/* Print the content of the doorbell recovery mechanism */ -void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) -{ - struct qed_db_recovery_entry *db_entry = NULL; - - DP_NOTICE(p_hwfn, - "Displaying doorbell recovery database. Counter was %d\n", - p_hwfn->db_recovery_info.db_recovery_counter); - - /* Protect the list */ - spin_lock_bh(&p_hwfn->db_recovery_info.lock); - list_for_each_entry(db_entry, - &p_hwfn->db_recovery_info.list, list_entry) { - qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); - } - - spin_unlock_bh(&p_hwfn->db_recovery_info.lock); -} - /* Ring the doorbell of a single doorbell recovery entry */ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, struct qed_db_recovery_entry *db_entry) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index ed1a84542ad2..10e355397cee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -/** - * qed_calc_session_ctx_validation(): Calcualte validation byte for - * session context. - * - * @p_ctx_mem: Pointer to context memory. - * @ctx_size: Context size. - * @ctx_type: Context type. - * @cid: Context cid. - * - * Return: Void. - */ -void qed_calc_session_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 cid); - -/** - * qed_calc_task_ctx_validation(): Calcualte validation byte for task - * context. - * - * @p_ctx_mem: Pointer to context memory. - * @ctx_size: Context size. - * @ctx_type: Context type. - * @tid: Context tid. - * - * Return: Void. - */ -void qed_calc_task_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 tid); - -/** - * qed_memset_session_ctx(): Memset session context to 0 while - * preserving validation bytes. - * - * @p_ctx_mem: Pointer to context memory. - * @ctx_size: Size to initialzie. - * @ctx_type: Context type. - * - * Return: Void. - */ -void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); - -/** - * qed_memset_task_ctx(): Memset task context to 0 while preserving - * validation bytes. - * - * @p_ctx_mem: Pointer to context memory. - * @ctx_size: size to initialzie. - * @ctx_type: context type. - * - * Return: Void. - */ -void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); - #define NUM_STORMS 6 /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 9e5f0dbc8a07..9907973399dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) return 0; } -void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) -{ - struct qed_ptt *p_ptt; - int i; - - for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { - p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; - p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; - } -} - void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_ptt_pool); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index e535983ce21b..3c98f58a184f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask { void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * qed_ptt_invalidate(): Forces all ptt entries to be re-configured - * - * @p_hwfn: HW device data. - * - * Return: Void. - */ -void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); - -/** * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * * @p_hwfn: HW device data. diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 407029a36fa1..aa20bb8caa9a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -18,16 +18,6 @@ #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG -static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { - {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ - {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ - {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ -}; - -static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { - {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ -}; - /* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ @@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); } -DECLARE_CRC8_TABLE(cdu_crc8_table); - -/* Calculate and return CDU validation byte per connection type/region/cid */ -static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) -{ - const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; - u8 crc, validation_byte = 0; - static u8 crc8_table_valid; /* automatically initialized to 0 */ - u32 validation_string = 0; - __be32 data_to_crc; - - if (!crc8_table_valid) { - crc8_populate_msb(cdu_crc8_table, 0x07); - crc8_table_valid = 1; - } - - /* The CRC is calculated on the String-to-compress: - * [31:8] = {CID[31:20],CID[11:0]} - * [7:4] = Region - * [3:0] = Type - */ - if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) - validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); - - if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) - validation_string |= ((region & 0xF) << 4); - - if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) - validation_string |= (conn_type & 0xF); - - /* Convert to big-endian and calculate CRC8 */ - data_to_crc = cpu_to_be32(validation_string); - crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), - CRC8_INIT_VALUE); - - /* The validation byte [7:0] is composed: - * for type A validation - * [7] = active configuration bit - * [6:0] = crc[6:0] - * - * for type B validation - * [7] = active configuration bit - * [6:3] = connection_type[3:0] - * [2:0] = crc[2:0] - */ - validation_byte |= - ((validation_cfg >> - CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; - - if ((validation_cfg >> - CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) - validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); - else - validation_byte |= crc & 0x7F; - - return validation_byte; -} - -/* Calcualte and set validation bytes for session context */ -void qed_calc_session_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 cid) -{ - u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; - - p_ctx = (u8 * const)p_ctx_mem; - x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; - t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; - u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; - - memset(p_ctx, 0, ctx_size); - - *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); - *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); - *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); -} - -/* Calcualte and set validation bytes for task context */ -void qed_calc_task_ctx_validation(void *p_ctx_mem, - u16 ctx_size, u8 ctx_type, u32 tid) -{ - u8 *p_ctx, *region1_val_ptr; - - p_ctx = (u8 * const)p_ctx_mem; - region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; - - memset(p_ctx, 0, ctx_size); - - *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); -} - -/* Memset session context to 0 while preserving validation bytes */ -void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) -{ - u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; - u8 x_val, t_val, u_val; - - p_ctx = (u8 * const)p_ctx_mem; - x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; - t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; - u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; - - x_val = *x_val_ptr; - t_val = *t_val_ptr; - u_val = *u_val_ptr; - - memset(p_ctx, 0, ctx_size); - - *x_val_ptr = x_val; - *t_val_ptr = t_val; - *u_val_ptr = u_val; -} - -/* Memset task context to 0 while preserving validation bytes */ -void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) -{ - u8 *p_ctx, *region1_val_ptr; - u8 region1_val; - - p_ctx = (u8 * const)p_ctx_mem; - region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; - - region1_val = *region1_val_ptr; - - memset(p_ctx, 0, ctx_size); - - *region1_val_ptr = region1_val; -} - /* Enable and configure context validation */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig index 9210ff360fdc..a4434eb38950 100644 --- a/drivers/net/ethernet/qualcomm/Kconfig +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -52,7 +52,6 @@ config QCOM_EMAC depends on HAS_DMA && HAS_IOMEM select CRC32 select PHYLIB - select MDIO_DEVRES help This driver supports the Qualcomm Technologies, Inc. Gigabit Ethernet Media Access Controller (EMAC). The controller diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 7a194a8ab989..2c1a0c21af8d 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -64,16 +64,15 @@ enum mac_version { /* support for RTL_GIGA_MAC_VER_50 has been removed */ RTL_GIGA_MAC_VER_51, RTL_GIGA_MAC_VER_52, - RTL_GIGA_MAC_VER_53, /* support for RTL_GIGA_MAC_VER_60 has been removed */ RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_VER_63, RTL_GIGA_MAC_VER_64, - RTL_GIGA_MAC_VER_65, RTL_GIGA_MAC_VER_66, RTL_GIGA_MAC_VER_70, - RTL_GIGA_MAC_VER_71, - RTL_GIGA_MAC_NONE + RTL_GIGA_MAC_VER_80, + RTL_GIGA_MAC_NONE, + RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1 }; struct rtl8169_private; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4eebd9cb40a3..43170500d566 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -60,6 +60,7 @@ #define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" +#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw" #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -91,61 +92,117 @@ #define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) #define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN) -static const struct { +static const struct rtl_chip_info { + u16 mask; + u16 val; + enum mac_version mac_version; const char *name; const char *fw_name; } rtl_chip_infos[] = { - /* PCI devices. */ - [RTL_GIGA_MAC_VER_02] = {"RTL8169s" }, - [RTL_GIGA_MAC_VER_03] = {"RTL8110s" }, - [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" }, - [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" }, - [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" }, - /* PCI-E devices. */ - [RTL_GIGA_MAC_VER_07] = {"RTL8102e" }, - [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, - [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, - [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, - [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, - [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, - [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" }, - [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, - [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, - [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, - [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, - [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, - [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" }, - [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1}, - [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2}, - [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3}, - [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1}, - [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2}, - [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 }, - [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 }, - [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1}, - [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2}, - [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3}, - [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2}, - [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 }, - [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2}, - [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2}, - [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, - [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, - [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", }, - [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, - /* reserve 62 for CFG_METHOD_4 in the vendor driver */ - [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, - [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1}, - [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2}, - [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2}, - [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2}, - [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3}, + /* 8127A family. */ + { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 }, + + /* 8126A family. */ + { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 }, + { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 }, + + /* 8125BP family. */ + { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 }, + + /* 8125D family. */ + { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 }, + { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 }, + + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 }, + + /* 8125A family. */ + { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 }, + + /* RTL8117 */ + { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" }, + { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117", + FIRMWARE_8168FP_3 }, + + /* 8168EP family. */ + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" }, + + /* 8168H family. */ + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h", + FIRMWARE_8168H_2 }, + /* Realtek calls it RTL8168M, but it's handled like RTL8168H */ + { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 }, + + /* 8168G family. */ + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu", + FIRMWARE_8168G_3 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g", + FIRMWARE_8168G_2 }, + + /* 8168F family. */ + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f", + FIRMWARE_8168F_2 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f", + FIRMWARE_8168F_1 }, + + /* 8168E family. */ + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl", + FIRMWARE_8168E_3 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e", + FIRMWARE_8168E_1 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e", + FIRMWARE_8168E_2 }, + + /* 8168D family. */ + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d", + FIRMWARE_8168D_1 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d", + FIRMWARE_8168D_2 }, + + /* 8168DP family. */ + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" }, + + /* 8168C family. */ + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" }, + + /* 8168B family. */ + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" }, + /* This one is very old and rare, support has been removed. + * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" }, + */ + + /* 8101 family. */ + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" }, + + /* 8110 family. */ + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" }, + + /* Catch-all */ + { 0x000, 0x000, RTL_GIGA_MAC_NONE } }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -169,8 +226,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, { PCI_VDEVICE(REALTEK, 0x8125) }, { PCI_VDEVICE(REALTEK, 0x8126) }, + { PCI_VDEVICE(REALTEK, 0x8127) }, { PCI_VDEVICE(REALTEK, 0x3000) }, { PCI_VDEVICE(REALTEK, 0x5000) }, + { PCI_VDEVICE(REALTEK, 0x0e10) }, {} }; @@ -716,6 +775,7 @@ MODULE_FIRMWARE(FIRMWARE_8125D_2); MODULE_FIRMWARE(FIRMWARE_8125BP_2); MODULE_FIRMWARE(FIRMWARE_8126A_2); MODULE_FIRMWARE(FIRMWARE_8126A_3); +MODULE_FIRMWARE(FIRMWARE_8127A_1); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -777,7 +837,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp) { return tp->mac_version >= RTL_GIGA_MAC_VER_34 && tp->mac_version != RTL_GIGA_MAC_VER_39 && - tp->mac_version <= RTL_GIGA_MAC_VER_53; + tp->mac_version <= RTL_GIGA_MAC_VER_52; } static bool rtl_supports_eee(struct rtl8169_private *tp) @@ -945,9 +1005,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx, static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) { /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ - if (type == ERIAR_OOB && - (tp->mac_version == RTL_GIGA_MAC_VER_52 || - tp->mac_version == RTL_GIGA_MAC_VER_53)) + if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52) *cmd |= 0xf70 << 18; } @@ -1236,7 +1294,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST: r8168g_mdio_write(tp, location, val); break; default: @@ -1251,7 +1309,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1447,7 +1505,7 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return RTL_DASH_DP; - case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52: return RTL_DASH_EP; case RTL_GIGA_MAC_VER_66: return RTL_DASH_25_BP; @@ -1603,7 +1661,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST: r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts); break; default: @@ -2076,7 +2134,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp) tp->tx_lpi_timer = timer_val; r8168_mac_ocp_write(tp, 0xe048, timer_val); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: tp->tx_lpi_timer = timer_val; RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val); break; @@ -2265,151 +2323,30 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats, }; -static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) +static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii) { - /* - * The driver currently handles the 8168Bf and the 8168Be identically - * but they can be identified more specifically through the test below - * if needed: - * - * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be - * - * Same thing for the 8101Eb and the 8101Ec: - * - * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec - */ - static const struct rtl_mac_info { - u16 mask; - u16 val; - enum mac_version ver; - } mac_info[] = { - /* 8126A family. */ - { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 }, - { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 }, - - /* 8125BP family. */ - { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 }, - - /* 8125D family. */ - { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 }, - { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 }, - - /* 8125B family. */ - { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, - - /* 8125A family. */ - { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 }, - /* It seems only XID 609 made it to the mass market. - * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, - * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, - */ - - /* RTL8117 */ - { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 }, - { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 }, - - /* 8168EP family. */ - { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, - /* It seems this chip version never made it to - * the wild. Let's disable detection. - * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, - * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, - */ - - /* 8168H family. */ - { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, - /* It seems this chip version never made it to - * the wild. Let's disable detection. - * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, - */ - /* Realtek calls it RTL8168M, but it's handled like RTL8168H */ - { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 }, - - /* 8168G family. */ - { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, - { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, - /* It seems this chip version never made it to - * the wild. Let's disable detection. - * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, - */ - { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, - - /* 8168F family. */ - { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, - { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, - { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, - - /* 8168E family. */ - { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, - { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, - { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, - - /* 8168D family. */ - { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, - { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, - - /* 8168DP family. */ - /* It seems this early RTL8168dp version never made it to - * the wild. Support has been removed. - * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, - */ - { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, - { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, - - /* 8168C family. */ - { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, - { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, - { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, - { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, - { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, - { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, - { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, - - /* 8168B family. */ - { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - /* This one is very old and rare, support has been removed. - * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, - */ - - /* 8101 family. */ - { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, - { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, - { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, - { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, - { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, - { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, - { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, - { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, - { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, - { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, - { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, - { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 }, - - /* 8110 family. */ - { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, - { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, - { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, - { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, - { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, - - /* Catch-all */ - { 0x000, 0x000, RTL_GIGA_MAC_NONE } + /* Chips combining a 1Gbps MAC with a 100Mbps PHY */ + static const struct rtl_chip_info rtl8106eus_info = { + .mac_version = RTL_GIGA_MAC_VER_43, + .name = "RTL8106eus", + .fw_name = FIRMWARE_8106E_2, }; - const struct rtl_mac_info *p = mac_info; - enum mac_version ver; + static const struct rtl_chip_info rtl8107e_info = { + .mac_version = RTL_GIGA_MAC_VER_48, + .name = "RTL8107e", + .fw_name = FIRMWARE_8107E_2, + }; + const struct rtl_chip_info *p = rtl_chip_infos; while ((xid & p->mask) != p->val) p++; - ver = p->ver; - if (ver != RTL_GIGA_MAC_NONE && !gmii) { - if (ver == RTL_GIGA_MAC_VER_42) - ver = RTL_GIGA_MAC_VER_43; - else if (ver == RTL_GIGA_MAC_VER_46) - ver = RTL_GIGA_MAC_VER_48; - } + if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii) + return &rtl8106eus_info; + if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii) + return &rtl8107e_info; - return ver; + return p; } static void rtl_release_firmware(struct rtl8169_private *tp) @@ -2553,13 +2490,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_38: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; case RTL_GIGA_MAC_VER_61: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | RX_PAUSE_SLOT_ON); break; @@ -2684,14 +2621,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; - case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); @@ -2852,10 +2789,23 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; } +static void rtl_csi_mod(struct rtl8169_private *tp, int addr, + u32 mask, u32 set) +{ + u32 val; + + WARN(addr % 4, "Invalid CSI address %#x\n", addr); + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + + val = rtl_csi_read(tp, addr); + rtl_csi_write(tp, addr, (val & ~mask) | set); +} + static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) { struct pci_dev *pdev = tp->pci_dev; - u32 csi; int rc; u8 val; @@ -2872,16 +2822,12 @@ static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) } } - netdev_notice_once(tp->dev, - "No native access to PCI extended config space, falling back to CSI\n"); - csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF); - rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL); + rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0); } static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; - u32 csi; /* According to Realtek the value at config space address 0x070f * controls the L0s/L1 entrance latency. We try standard ECAM access @@ -2893,10 +2839,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) return; - netdev_notice_once(tp->dev, - "No native access to PCI extended config space, falling back to CSI\n"); - csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; - rtl_csi_write(tp, 0x070c, csi | val << 24); + rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24); } static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) @@ -2960,7 +2903,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: rtl_eri_set_bits(tp, 0xd4, 0x0c00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST: r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); break; default: @@ -2974,7 +2917,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: rtl_eri_clear_bits(tp, 0xd4, 0x1f00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST: r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); break; default: @@ -3001,7 +2944,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) rtl_mod_config5(tp, 0, ASPM_en); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_70: - case RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_80: val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -3012,7 +2955,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: /* reset ephy tx/rx disable timer */ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); /* chip can trigger L1.2 */ @@ -3024,7 +2967,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) } else { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); break; default: @@ -3033,7 +2976,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_70: - case RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_80: val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN; RTL_W8(tp, INT_CFG0_8125, val8); break; @@ -3754,11 +3697,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); if (tp->mac_version == RTL_GIGA_MAC_VER_70 || - tp->mac_version == RTL_GIGA_MAC_VER_71) + tp->mac_version == RTL_GIGA_MAC_VER_80) RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02); - if (tp->mac_version == RTL_GIGA_MAC_VER_70 || - tp->mac_version == RTL_GIGA_MAC_VER_71) + if (tp->mac_version == RTL_GIGA_MAC_VER_80) + r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00); + else if (tp->mac_version == RTL_GIGA_MAC_VER_70) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); else if (tp->mac_version == RTL_GIGA_MAC_VER_63) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); @@ -3777,7 +3721,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); if (tp->mac_version == RTL_GIGA_MAC_VER_70 || - tp->mac_version == RTL_GIGA_MAC_VER_71) + tp->mac_version == RTL_GIGA_MAC_VER_80) r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000); else r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3855,6 +3799,12 @@ static void rtl_hw_start_8126a(struct rtl8169_private *tp) rtl_hw_start_8125_common(tp); } +static void rtl_hw_start_8127a(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_start_8125_common(tp); +} + static void rtl_hw_config(struct rtl8169_private *tp) { static const rtl_generic_fct hw_configs[] = { @@ -3893,14 +3843,12 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1, [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, - [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, [RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d, - [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d, [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d, [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a, - [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a, + [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a, }; if (hw_configs[tp->mac_version]) @@ -3917,14 +3865,15 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) switch (tp->mac_version) { case RTL_GIGA_MAC_VER_61: case RTL_GIGA_MAC_VER_64: - case RTL_GIGA_MAC_VER_65: case RTL_GIGA_MAC_VER_66: + case RTL_GIGA_MAC_VER_80: for (i = 0xa00; i < 0xb00; i += 4) RTL_W32(tp, i, 0); + if (tp->mac_version == RTL_GIGA_MAC_VER_80) + RTL_W16(tp, INT_CFG1_8125, 0x0000); break; case RTL_GIGA_MAC_VER_63: case RTL_GIGA_MAC_VER_70: - case RTL_GIGA_MAC_VER_71: for (i = 0xa00; i < 0xa80; i += 4) RTL_W32(tp, i, 0); RTL_W16(tp, INT_CFG1_8125, 0x0000); @@ -4156,7 +4105,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -4313,7 +4262,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: padto = max_t(unsigned int, padto, ETH_ZLEN); break; default: @@ -5101,10 +5050,8 @@ static void rtl_shutdown(struct pci_dev *pdev) /* Restore original MAC address */ rtl_rar_set(tp, tp->dev->perm_addr); - if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) { - pci_wake_from_d3(pdev, tp->saved_wolopts); - pci_set_power_state(pdev, PCI_D3hot); - } + if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) + pci_prepare_to_sleep(pdev); } static void rtl_remove_one(struct pci_dev *pdev) @@ -5358,13 +5305,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp) static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53: + case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52: rtl8168ep_stop_cmac(tp); fallthrough; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: rtl_hw_init_8125(tp); break; default: @@ -5389,7 +5336,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: return JUMBO_6K; /* RTL8125/8126 */ - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST: return JUMBO_16K; default: return JUMBO_9K; @@ -5434,9 +5381,9 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp) static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { + const struct rtl_chip_info *chip; struct rtl8169_private *tp; int jumbo_max, region, rc; - enum mac_version chipset; struct net_device *dev; u32 txconfig; u16 xid; @@ -5486,12 +5433,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) xid = (txconfig >> 20) & 0xfcf; /* Identify chip attached to board */ - chipset = rtl8169_get_mac_version(xid, tp->supports_gmii); - if (chipset == RTL_GIGA_MAC_NONE) + chip = rtl8169_get_chip_version(xid, tp->supports_gmii); + if (chip->mac_version == RTL_GIGA_MAC_NONE) return dev_err_probe(&pdev->dev, -ENODEV, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid); - tp->mac_version = chipset; + tp->mac_version = chip->mac_version; + tp->fw_name = chip->fw_name; /* Disable ASPM L1 as that cause random device stop working * problems as well as full system hangs for some PCIe devices users. @@ -5596,8 +5544,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_set_irq_mask(tp); - tp->fw_name = rtl_chip_infos[chipset].fw_name; - tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), &tp->counters_phys_addr, GFP_KERNEL); @@ -5622,7 +5568,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", - rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq); + chip->name, dev->dev_addr, xid, tp->irq); if (jumbo_max) netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n", diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index cf95e579c65d..032d9d2cfa2a 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm, phy_restore_page(phydev, oldpage, 0); } +static void rtl8125_phy_param(struct phy_device *phydev, u16 parm, + u16 mask, u16 val) +{ + phy_lock_mdio_bus(phydev); + __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm); + __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val); + phy_unlock_mdio_bus(phydev); +} + struct phy_reg { u16 reg; u16 val; @@ -1004,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, phy_write_paged(phydev, 0xac5, 0x16, 0x01ff); phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x80a2); - phy_write(phydev, 0x17, 0x0153); - phy_write(phydev, 0x16, 0x809c); - phy_write(phydev, 0x17, 0x0153); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153); + rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153); phy_write(phydev, 0x1f, 0x0a43); phy_write(phydev, 0x13, 0x81B3); @@ -1061,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x80f5); - phy_write(phydev, 0x17, 0x760e); - phy_write(phydev, 0x16, 0x8107); - phy_write(phydev, 0x17, 0x360e); - phy_write(phydev, 0x16, 0x8551); - phy_modify(phydev, 0x17, 0xff00, 0x0800); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e); + rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e); + rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800); phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); @@ -1110,12 +1110,8 @@ static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp, r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000); - phy_write(phydev, 0x1f, 0x0b87); - phy_write(phydev, 0x16, 0x8088); - phy_modify(phydev, 0x17, 0xff00, 0x9000); - phy_write(phydev, 0x16, 0x808f); - phy_modify(phydev, 0x17, 0xff00, 0x9000); - phy_write(phydev, 0x1f, 0x0000); + rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000); + rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000); r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800); @@ -1134,6 +1130,171 @@ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, rtl8125_common_config_eee_phy(phydev); } +static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + rtl8168g_enable_gphy_10m(phydev); + + r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300); + r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00); + r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00); + r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900); + rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000); + rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000); + rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000); + rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000); + phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160); + phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014); + phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000); + phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000); + phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187); + phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003); + + r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620); + r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671); + r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000); + r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000); + r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000); + r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000); + r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000); + r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000); + r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000); + r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600); + r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600); + r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600); + r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600); + r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400); + r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400); + r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600); + + phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000); + + rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c); + rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117); + + phy_write_paged(phydev, 0x0aec, 0x13, 0x0000); + phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff); + phy_write_paged(phydev, 0x0aec, 0x17, 0xffff); + phy_write_paged(phydev, 0x0aed, 0x11, 0xffff); + phy_write_paged(phydev, 0x0aec, 0x14, 0x0000); + phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000); + phy_write_paged(phydev, 0x0adb, 0x14, 0x0150); + rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000); + rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000); + rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000); + rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700); + rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700); + rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700); + + rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150); + rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150); + rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150); + rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000); + rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000); + rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000); + + rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800); + phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000); + rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00); + rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00); + rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00); + phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001); + rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000); + + rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000); + rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000); + rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818); + rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000); + + rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008); + rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450); + + rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200); + rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000); + rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800); + rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114); + + rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1); + rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca); + rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00); + + rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000); + rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000); + + rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff); + rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff); + + rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00); + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800); + r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000); + phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001); + r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700); + rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00); + r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00); + rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00); + + rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300); + phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800); + rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000); + rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195); + + r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671); + r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8); + + r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8); + r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da); + r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000); + r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000); + + r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40); + + phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328); + phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14); + + r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b); + r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086); + r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7); + r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db); + r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe); + r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe); + r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe); + r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe); + r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3); + r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078); + r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047); + r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023); + + rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0); + rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0); + rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a); + + rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf); + rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff); + rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a); + rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a); + rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a); + + rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a); + rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200); + + r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00); + r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600); + + phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003); + + rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); + rtl8125_common_config_eee_phy(phydev); +} + void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, enum mac_version ver) { @@ -1180,14 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config, [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, - [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, [RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config, - [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config, [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config, [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config, - [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config, + [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config, }; if (phy_configs[ver]) diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h index 2bbfcad613ab..498cfe4d0cac 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase.h +++ b/drivers/net/ethernet/realtek/rtase/rtase.h @@ -170,6 +170,7 @@ enum rtase_registers { #define RTASE_TC_MODE_MASK GENMASK(11, 10) RTASE_TOKSEL = 0x2046, + RTASE_TXQCRDT_0 = 0x2500, RTASE_RFIFONFULL = 0x4406, RTASE_INT_MITI_TX = 0x0A00, RTASE_INT_MITI_RX = 0x0A80, @@ -259,6 +260,12 @@ union rtase_rx_desc { #define RTASE_VLAN_TAG_MASK GENMASK(15, 0) #define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0) +/* txqos hardware definitions */ +#define RTASE_1T_CLOCK 64 +#define RTASE_1T_POWER 10000000 +#define RTASE_IDLESLOPE_INT_SHIFT 25 +#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25) + #define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10) struct rtase_int_vector { @@ -294,6 +301,13 @@ struct rtase_ring { u64 alloc_fail; }; +struct rtase_txqos { + int hicredit; + int locredit; + int idleslope; + int sendslope; +}; + struct rtase_stats { u64 tx_dropped; u64 rx_dropped; @@ -313,6 +327,7 @@ struct rtase_private { struct page_pool *page_pool; struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE]; + struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE]; struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE]; struct rtase_counters *tally_vaddr; dma_addr_t tally_paddr; diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 55b8d3666153..4d37217e9a14 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1114,7 +1114,7 @@ static int rtase_open(struct net_device *dev) /* request other interrupts to handle multiqueue */ for (i = 1; i < tp->int_nums; i++) { ivec = &tp->int_vector[i]; - snprintf(ivec->name, sizeof(ivec->name), "%s_int%i", + snprintf(ivec->name, sizeof(ivec->name), "%s_int%u", tp->dev->name, i); ret = request_irq(ivec->irq, rtase_q_interrupt, 0, ivec->name, ivec); @@ -1661,6 +1661,65 @@ static void rtase_get_stats64(struct net_device *dev, stats->rx_length_errors = tp->stats.rx_length_errors; } +static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue) +{ + u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK; + u32 val, i; + + val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK); + idle %= RTASE_1T_POWER; + + for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) { + idle *= 2; + if ((idle / RTASE_1T_POWER) == 1) + val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i); + + idle %= RTASE_1T_POWER; + } + + rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val); +} + +static int rtase_setup_tc_cbs(struct rtase_private *tp, + const struct tc_cbs_qopt_offload *qopt) +{ + int queue = qopt->queue; + + if (queue < 0 || queue >= tp->func_tx_queue_num) + return -EINVAL; + + if (!qopt->enable) { + tp->tx_qos[queue].hicredit = 0; + tp->tx_qos[queue].locredit = 0; + tp->tx_qos[queue].idleslope = 0; + tp->tx_qos[queue].sendslope = 0; + + rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0); + } else { + tp->tx_qos[queue].hicredit = qopt->hicredit; + tp->tx_qos[queue].locredit = qopt->locredit; + tp->tx_qos[queue].idleslope = qopt->idleslope; + tp->tx_qos[queue].sendslope = qopt->sendslope; + + rtase_set_hw_cbs(tp, queue); + } + + return 0; +} + +static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct rtase_private *tp = netdev_priv(dev); + + switch (type) { + case TC_SETUP_QDISC_CBS: + return rtase_setup_tc_cbs(tp, type_data); + default: + return -EOPNOTSUPP; + } +} + static netdev_features_t rtase_fix_features(struct net_device *dev, netdev_features_t features) { @@ -1696,6 +1755,7 @@ static const struct net_device_ops rtase_netdev_ops = { .ndo_change_mtu = rtase_change_mtu, .ndo_tx_timeout = rtase_tx_timeout, .ndo_get_stats64 = rtase_get_stats64, + .ndo_setup_tc = rtase_setup_tc, .ndo_fix_features = rtase_fix_features, .ndo_set_features = rtase_set_features, }; @@ -1923,7 +1983,7 @@ static u16 rtase_calc_time_mitigation(u32 time_us) u8 msb, time_count, time_unit; u16 int_miti; - time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); + time_us = min(time_us, RTASE_MITI_MAX_TIME); if (time_us > RTASE_MITI_TIME_COUNT_MASK) { msb = fls(time_us); @@ -1945,7 +2005,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num) u8 msb, pkt_num_count, pkt_num_unit; u16 int_miti; - pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM); + pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM); if (pkt_num > 60) { pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index b4365906669f..226c6c0ab945 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -176,12 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, struct net_device *ndev = priv->ndev; unsigned long flags; - /* Reject requests with unsupported flags */ - if (req->flags & ~(PTP_ENABLE_FEATURE | - PTP_RISING_EDGE | - PTP_FALLING_EDGE)) - return -EOPNOTSUPP; - if (req->index) return -EINVAL; @@ -212,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, unsigned long flags; int error = 0; - /* Reject requests with unsupported flags */ - if (req->flags) - return -EOPNOTSUPP; - if (req->index) return -EINVAL; @@ -287,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = { .max_adj = 50000000, .n_ext_ts = N_EXT_TS, .n_per_out = N_PER_OUT, + .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE, .adjfine = ravb_ptp_adjfine, .adjtime = ravb_ptp_adjtime, .gettime64 = ravb_ptp_gettime64, diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index d5db26103d82..19985d13e243 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); /* Check if adding and already exists, or removing and can't find */ - if (!found != !removing) { + if (!found == removing) { kfree(fdb); if (!found && removing) return 0; diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 332cbd725900..df869f82cae8 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev, SET_NETDEV_DEV(net_dev, &pci_dev->dev); /* We do a request_region() to register /proc/ioports info. */ - ret = pci_request_regions(pci_dev, "sis900"); + ret = pcim_request_all_regions(pci_dev, "sis900"); if (ret) goto err_out; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 3c820ef56775..67fa879b1e52 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -3,6 +3,7 @@ config STMMAC_ETH tristate "STMicroelectronics Multi-Gigabit Ethernet driver" depends on HAS_IOMEM && HAS_DMA depends on PTP_1588_CLOCK_OPTIONAL + depends on ETHTOOL_NETLINK select MII select PCS_XPCS select PAGE_POOL @@ -131,6 +132,17 @@ config DWMAC_QCOM_ETHQOS This selects the Qualcomm ETHQOS glue layer support for the stmmac device driver. +config DWMAC_RENESAS_GBETH + tristate "Renesas RZ/V2H(P) GBETH support" + default ARCH_RENESAS + depends on OF && (ARCH_RENESAS || COMPILE_TEST) + help + Support for Gigabit Ethernet Interface (GBETH) on Renesas + RZ/V2H(P) SoCs. + + This selects the Renesas RZ/V2H(P) Soc specific glue layer support + for the stmmac device driver. + config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 594883fb4164..b591d93f8503 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ - stmmac_xdp.o stmmac_est.o stmmac_fpe.o \ + stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \ $(stmmac-y) stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o @@ -20,6 +20,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o +obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 412b07e77945..ea5da5793362 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -602,6 +602,7 @@ struct mac_device_info { const struct stmmac_tc_ops *tc; const struct stmmac_mmc_ops *mmc; const struct stmmac_est_ops *est; + const struct stmmac_vlan_ops *vlan; struct dw_xpcs *xpcs; struct phylink_pcs *phylink_pcs; struct mii_regs mii; /* MII register Addresses */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 37fe7c288878..84072c8ed741 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -65,13 +65,12 @@ anarion_config_dt(struct platform_device *pdev, { struct anarion_gmac *gmac; void __iomem *ctl_block; - int err; ctl_block = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(ctl_block)) { - err = PTR_ERR(ctl_block); - dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err); - return ERR_PTR(err); + dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n", + ctl_block); + return ERR_CAST(ctl_block); } gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); @@ -80,17 +79,11 @@ anarion_config_dt(struct platform_device *pdev, gmac->ctl_block = ctl_block; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - fallthrough; - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: + if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) { gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; - break; - default: - dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", - plat_dat->phy_interface); + } else { + dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n", + phy_modes(plat_dat->phy_interface)); return ERR_PTR(-ENOTSUPP); } @@ -118,10 +111,9 @@ static int anarion_dwmac_probe(struct platform_device *pdev) plat_dat->init = anarion_gmac_init; plat_dat->exit = anarion_gmac_exit; - anarion_gmac_init(pdev, gmac); plat_dat->bsp_priv = gmac; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct of_device_id anarion_dwmac_match[] = { @@ -132,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match); static struct platform_driver anarion_dwmac_driver = { .probe = anarion_dwmac_probe, - .remove = stmmac_pltfr_remove, .driver = { .name = "anarion-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index cd431f84f34f..09ae16e026eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -29,21 +29,10 @@ struct tegra_eqos { void __iomem *regs; struct reset_control *rst; - struct clk *clk_slave; struct gpio_desc *reset; }; -static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat, - const char *name) -{ - for (int i = 0; i < plat_dat->num_clks; i++) - if (strcmp(plat_dat->clks[i].id, name) == 0) - return plat_dat->clks[i].clk; - - return NULL; -} - static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat) { @@ -132,7 +121,7 @@ static int dwc_qos_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *stmmac_res) { - plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk"); + plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk"); return 0; } @@ -147,10 +136,11 @@ static int dwc_qos_probe(struct platform_device *pdev, #define AUTO_CAL_STATUS 0x880c #define AUTO_CAL_STATUS_ACTIVE BIT(31) -static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode) +static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode) { - struct tegra_eqos *eqos = priv; + struct tegra_eqos *eqos = bsp_priv; bool needs_calibration = false; + struct stmmac_priv *priv; u32 value; int err; @@ -169,6 +159,11 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode) } if (needs_calibration) { + priv = netdev_priv(dev_get_drvdata(eqos->dev)); + + /* Calibration should be done with the MDIO bus idle */ + mutex_lock(&priv->mii->mdio_lock); + /* calibrate */ value = readl(eqos->regs + SDMEMCOMPPADCTRL); value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; @@ -202,6 +197,8 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode) value = readl(eqos->regs + SDMEMCOMPPADCTRL); value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; writel(value, eqos->regs + SDMEMCOMPPADCTRL); + + mutex_unlock(&priv->mii->mdio_lock); } else { value = readl(eqos->regs + AUTO_CAL_CONFIG); value &= ~AUTO_CAL_CONFIG_ENABLE; @@ -209,20 +206,6 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode) } } -static int tegra_eqos_init(struct platform_device *pdev, void *priv) -{ - struct tegra_eqos *eqos = priv; - unsigned long rate; - u32 value; - - rate = clk_get_rate(eqos->clk_slave); - - value = (rate / 1000000) - 1; - writel(value, eqos->regs + GMAC_1US_TIC_COUNTER); - - return 0; -} - static int tegra_eqos_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) @@ -237,12 +220,11 @@ static int tegra_eqos_probe(struct platform_device *pdev, eqos->dev = &pdev->dev; eqos->regs = res->addr; - eqos->clk_slave = plat_dat->stmmac_clk; if (!is_of_node(dev->fwnode)) goto bypass_clk_reset_gpio; - plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx"); + plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx"); eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH); if (IS_ERR(eqos->reset)) { @@ -277,17 +259,12 @@ static int tegra_eqos_probe(struct platform_device *pdev, bypass_clk_reset_gpio: plat_dat->fix_mac_speed = tegra_eqos_fix_speed; plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; - plat_dat->init = tegra_eqos_init; plat_dat->bsp_priv = eqos; - plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE; - - err = tegra_eqos_init(pdev, eqos); - if (err < 0) - goto reset; + plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE | + STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP; return 0; -reset: - reset_control_assert(eqos->rst); + reset_phy: gpiod_set_value(eqos->reset, 1); @@ -362,8 +339,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n"); - plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat, - data->stmmac_clk_name); + plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat, + data->stmmac_clk_name); if (data->probe) ret = data->probe(pdev, plat_dat, &stmmac_res); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 5d279fa54b3e..889e2bb6f7f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -379,10 +379,6 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - ret = imx_dwmac_init(pdev, dwmac); - if (ret) - goto err_dwmac_init; - if (dwmac->ops->fix_mac_speed) { plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed; } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) { @@ -392,16 +388,10 @@ static int imx_dwmac_probe(struct platform_device *pdev) dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); if (ret) - goto err_drv_probe; - - return 0; + imx_dwmac_clks_config(dwmac, false); -err_drv_probe: - imx_dwmac_exit(pdev, plat_dat->bsp_priv); -err_dwmac_init: - imx_dwmac_clks_config(dwmac, false); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 066783d66422..15abe214131f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -56,6 +56,7 @@ enum ingenic_mac_version { struct ingenic_mac { const struct ingenic_soc_info *soc_info; + struct plat_stmmacenet_data *plat_dat; struct device *dev; struct regmap *regmap; @@ -70,13 +71,13 @@ struct ingenic_soc_info { int (*set_mode)(struct plat_stmmacenet_data *plat_dat); }; -static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat) +static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv) { - struct ingenic_mac *mac = plat_dat->bsp_priv; + struct ingenic_mac *mac = bsp_priv; int ret; if (mac->soc_info->set_mode) { - ret = mac->soc_info->set_mode(plat_dat); + ret = mac->soc_info->set_mode(mac->plat_dat); if (ret) return ret; } @@ -284,44 +285,14 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->soc_info = data; mac->dev = &pdev->dev; + mac->plat_dat = plat_dat; plat_dat->bsp_priv = mac; + plat_dat->init = ingenic_mac_init; - ret = ingenic_mac_init(plat_dat); - if (ret) - return ret; - - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); -} - -#ifdef CONFIG_PM_SLEEP -static int ingenic_mac_suspend(struct device *dev) -{ - int ret; - - ret = stmmac_suspend(dev); - - return ret; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } -static int ingenic_mac_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct stmmac_priv *priv = netdev_priv(ndev); - int ret; - - ret = ingenic_mac_init(priv->plat); - if (ret) - return ret; - - ret = stmmac_resume(dev); - - return ret; -} -#endif /* CONFIG_PM_SLEEP */ - -static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume); - static struct ingenic_soc_info jz4775_soc_info = { .version = ID_JZ4775, .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, @@ -370,10 +341,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches); static struct platform_driver ingenic_mac_driver = { .probe = ingenic_mac_probe, - .remove = stmmac_pltfr_remove, .driver = { .name = "ingenic-mac", - .pm = pm_ptr(&ingenic_mac_pm_ops), + .pm = &stmmac_pltfr_pm_ops, .of_match_table = ingenic_mac_of_matches, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 599def7b3a64..4ea7b0a803d7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -113,16 +113,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) plat_dat->clk_tx_i = dwmac->tx_clk; plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; - plat_dat->bsp_priv = dwmac; - plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate; - - if (plat_dat->eee_usecs_rate > 0) { - u32 tx_lpi_usec; - - tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1; - writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER); - } ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index c8bb9265bbb4..9a47015254bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -284,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data) } } -static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) +static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv, + unsigned long *interfaces) { - struct intel_priv_data *intel_priv = intel_data; - struct stmmac_priv *priv = netdev_priv(ndev); - int serdes_phy_addr = 0; - u32 data = 0; - - serdes_phy_addr = intel_priv->mdio_adhoc_addr; + struct intel_priv_data *intel_priv = bsp_priv; + phy_interface_t interface; + int data; /* Determine the link speed mode: 2.5Gbps/1Gbps */ - data = mdiobus_read(priv->mii, serdes_phy_addr, - SERDES_GCR); + data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR); + if (data < 0) + return; - if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) == - SERDES_LINK_MODE_2G5) { + if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) { dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); - priv->plat->max_speed = 2500; - priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; priv->plat->mdio_bus_data->default_an_inband = false; + interface = PHY_INTERFACE_MODE_2500BASEX; } else { - priv->plat->max_speed = 1000; + interface = PHY_INTERFACE_MODE_SGMII; } + + __set_bit(interface, interfaces); + priv->plat->phy_interface = interface; } /* Program PTP Clock Frequency for different variant of @@ -682,7 +682,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->axi->axi_blen[2] = 16; plat->ptp_max_adj = plat->clk_ptp_rate; - plat->eee_usecs_rate = plat->clk_ptp_rate; /* Set system clock */ sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev)); @@ -933,7 +932,7 @@ static int tgl_common_data(struct pci_dev *pdev, plat->rx_queues_to_use = 6; plat->tx_queues_to_use = 4; plat->clk_ptp_rate = 204800000; - plat->speed_mode_2500 = intel_speed_mode_2500; + plat->get_interfaces = tgl_get_interfaces; plat->safety_feat_cfg->tsoee = 1; plat->safety_feat_cfg->mrxpee = 0; @@ -952,7 +951,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 1; - plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; return tgl_common_data(pdev, plat); @@ -966,7 +964,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 2; - plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; return tgl_common_data(pdev, plat); @@ -980,7 +977,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 1; - plat->phy_interface = PHY_INTERFACE_MODE_SGMII; /* SerDes power up and power down are done in BIOS for ADL */ @@ -995,7 +991,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 2; - plat->phy_interface = PHY_INTERFACE_MODE_SGMII; /* SerDes power up and power down are done in BIOS for ADL */ @@ -1313,13 +1308,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; - if (plat->eee_usecs_rate > 0) { - u32 tx_lpi_usec; - - tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; - writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); - } - ret = stmmac_config_multi_msi(pdev, plat, &res); if (ret) { ret = stmmac_config_single_msi(pdev, plat, &res); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h index a12f8e65f89f..7511c224b312 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h @@ -21,7 +21,6 @@ #define SERDES_RATE_MASK GENMASK(9, 8) #define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */ #define SERDES_LINK_MODE_MASK GENMASK(2, 1) -#define SERDES_LINK_MODE_SHIFT 1 #define SERDES_PWR_ST_SHIFT 4 #define SERDES_PWR_ST_P0 0x0 #define SERDES_PWR_ST_P3 0x3 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 1a93787056a7..e1591e6217d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -66,12 +66,14 @@ DMA_STATUS_TPS | DMA_STATUS_TI | \ DMA_STATUS_MSK_COMMON_LOONGSON) -#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 +#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03 +#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23 #define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13 -#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */ -#define CHANNEL_NUM 8 +#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */ +#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */ struct loongson_data { + u32 multichan; u32 loongson_id; struct device *dev; }; @@ -83,6 +85,8 @@ struct stmmac_pci_info { static void loongson_default_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct loongson_data *ld = plat->bsp_priv; + /* Get bus_id, this can be overwritten later */ plat->bus_id = pci_dev_id(pdev); @@ -116,31 +120,37 @@ static void loongson_default_data(struct pci_dev *pdev, plat->dma_cfg->pbl = 32; plat->dma_cfg->pblx8 = true; -} - -static int loongson_gmac_data(struct pci_dev *pdev, - struct plat_stmmacenet_data *plat) -{ - struct loongson_data *ld; - int i; - - ld = plat->bsp_priv; - loongson_default_data(pdev, plat); - - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { - plat->rx_queues_to_use = CHANNEL_NUM; - plat->tx_queues_to_use = CHANNEL_NUM; + switch (ld->loongson_id) { + case DWMAC_CORE_MULTICHAN_V1: + ld->multichan = 1; + plat->rx_queues_to_use = 8; + plat->tx_queues_to_use = 8; /* Only channel 0 supports checksum, * so turn off checksum to enable multiple channels. */ - for (i = 1; i < CHANNEL_NUM; i++) + for (int i = 1; i < 8; i++) plat->tx_queues_cfg[i].coe_unsupported = 1; - } else { + + break; + case DWMAC_CORE_MULTICHAN_V2: + ld->multichan = 1; + plat->rx_queues_to_use = 4; + plat->tx_queues_to_use = 4; + break; + default: + ld->multichan = 0; plat->tx_queues_to_use = 1; plat->rx_queues_to_use = 1; + break; } +} + +static int loongson_gmac_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + loongson_default_data(pdev, plat); plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; @@ -172,27 +182,8 @@ static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode) static int loongson_gnet_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { - struct loongson_data *ld; - int i; - - ld = plat->bsp_priv; - loongson_default_data(pdev, plat); - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { - plat->rx_queues_to_use = CHANNEL_NUM; - plat->tx_queues_to_use = CHANNEL_NUM; - - /* Only channel 0 supports checksum, - * so turn off checksum to enable multiple channels. - */ - for (i = 1; i < CHANNEL_NUM; i++) - plat->tx_queues_cfg[i].coe_unsupported = 1; - } else { - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - } - plat->phy_interface = PHY_INTERFACE_MODE_GMII; plat->mdio_bus_data->phy_mask = ~(u32)BIT(2); plat->fix_mac_speed = loongson_gnet_fix_speed; @@ -350,14 +341,14 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) return NULL; /* The Loongson GMAC and GNET devices are based on the DW GMAC - * v3.50a and v3.73a IP-cores. But the HW designers have changed the - * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the - * network controllers with the multi-channels feature + * v3.50a and v3.73a IP-cores. But the HW designers have changed + * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value + * on the network controllers with the multi-channels feature * available to emphasize the differences: multiple DMA-channels, * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the * original value so the correct HW-interface would be selected. */ - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) { + if (ld->multichan) { priv->synopsys_id = DWMAC_CORE_3_70; *dma = dwmac1000_dma_ops; dma->init_chan = loongson_dwmac_dma_init_channel; @@ -378,13 +369,13 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) if (mac->multicast_filter_bins) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); - /* Loongson GMAC doesn't support the flow control. LS2K2000 - * GNET doesn't support the half-duplex link mode. + /* Loongson GMAC doesn't support the flow control. Loongson GNET + * without multi-channel doesn't support the half-duplex link mode. */ - if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) { + if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) { mac->link.caps = MAC_10 | MAC_100 | MAC_1000; } else { - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + if (ld->multichan) mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000; else @@ -413,9 +404,11 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res) { - int i, ret, vecs; + int i, ch_num, ret, vecs; + + ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use); - vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1); + vecs = roundup_pow_of_two(ch_num * 2 + 1); ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI); if (ret < 0) { dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n"); @@ -424,14 +417,12 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev, res->irq = pci_irq_vector(pdev, 0); - for (i = 0; i < plat->rx_queues_to_use; i++) { - res->rx_irq[CHANNEL_NUM - 1 - i] = - pci_irq_vector(pdev, 1 + i * 2); + for (i = 0; i < ch_num; i++) { + res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2); } - for (i = 0; i < plat->tx_queues_to_use; i++) { - res->tx_irq[CHANNEL_NUM - 1 - i] = - pci_irq_vector(pdev, 2 + i * 2); + for (i = 0; i < ch_num; i++) { + res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2); } plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; @@ -593,7 +584,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id goto err_disable_device; /* Use the common MAC IRQ if per-channel MSIs allocation failed */ - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + if (ld->multichan) loongson_dwmac_msi_config(pdev, plat, &res); ret = stmmac_dvr_probe(&pdev->dev, plat, &res); @@ -605,7 +596,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id err_plat_clear: if (dev_of_node(&pdev->dev)) loongson_dwmac_dt_clear(pdev, plat); - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + if (ld->multichan) loongson_dwmac_msi_clear(pdev); err_disable_device: pci_disable_device(pdev); @@ -624,7 +615,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) if (dev_of_node(&pdev->dev)) loongson_dwmac_dt_clear(pdev, priv->plat); - if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) + if (ld->multichan) loongson_dwmac_msi_clear(pdev); pci_disable_device(pdev); @@ -669,7 +660,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, loongson_dwmac_resume); static const struct pci_device_id loongson_dwmac_id_table[] = { - { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) }, { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) }, {} }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index d178d5ddc7c7..39421d6a34e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -581,7 +581,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, int i; priv_plat->phy_mode = plat->phy_interface; - plat->mac_interface = priv_plat->phy_mode; if (priv_plat->mac_wol) plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL; else diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 0e4da216f942..e30bdf72331a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -106,12 +106,11 @@ struct qcom_ethqos { struct platform_device *pdev; void __iomem *rgmii_base; void __iomem *mac_base; - int (*configure_func)(struct qcom_ethqos *ethqos); + int (*configure_func)(struct qcom_ethqos *ethqos, int speed); unsigned int link_clk_rate; struct clk *link_clk; struct phy *serdes_phy; - int speed; int serdes_speed; phy_interface_t phy_mode; @@ -385,7 +384,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos) return 0; } -static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) +static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed) { struct device *dev = ðqos->pdev->dev; int phase_shift; @@ -412,7 +411,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL, 0, RGMII_IO_MACRO_CONFIG); - switch (ethqos->speed) { + switch (speed) { case SPEED_1000: rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); @@ -532,14 +531,14 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) loopback, RGMII_IO_MACRO_CONFIG); break; default: - dev_err(dev, "Invalid speed %d\n", ethqos->speed); + dev_err(dev, "Invalid speed %d\n", speed); return -EINVAL; } return 0; } -static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) +static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed) { struct device *dev = ðqos->pdev->dev; volatile unsigned int dll_lock; @@ -562,7 +561,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG); if (ethqos->has_emac_ge_3) { - if (ethqos->speed == SPEED_1000) { + if (speed == SPEED_1000) { rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL); rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL); rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2); @@ -580,7 +579,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0, SDCC_HC_REG_DLL_CONFIG); - if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) { + if (speed != SPEED_100 && speed != SPEED_10) { /* Set DLL_EN */ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); @@ -607,10 +606,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) dev_err(dev, "Timeout while waiting for DLL lock\n"); } - if (ethqos->speed == SPEED_1000) + if (speed == SPEED_1000) ethqos_dll_configure(ethqos); - ethqos_rgmii_macro_init(ethqos); + ethqos_rgmii_macro_init(ethqos, speed); return 0; } @@ -626,7 +625,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) /* On interface toggle MAC registers gets reset. * Configure MAC block for SGMII on ethernet phy link up */ -static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) +static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) { struct net_device *dev = platform_get_drvdata(ethqos->pdev); struct stmmac_priv *priv = netdev_priv(dev); @@ -634,7 +633,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) val = readl(ethqos->mac_base + MAC_CTRL_REG); - switch (ethqos->speed) { + switch (speed) { case SPEED_2500: val &= ~ETHQOS_MAC_CTRL_PORT_SEL; rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, @@ -673,17 +672,9 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) return val; } -static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data) +static int ethqos_configure(struct qcom_ethqos *ethqos, int speed) { - struct stmmac_priv *priv = netdev_priv(ndev); - - priv->plat->max_speed = 2500; - priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; -} - -static int ethqos_configure(struct qcom_ethqos *ethqos) -{ - return ethqos->configure_func(ethqos); + return ethqos->configure_func(ethqos, speed); } static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode) @@ -691,9 +682,8 @@ static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode) struct qcom_ethqos *ethqos = priv; qcom_ethqos_set_sgmii_loopback(ethqos, false); - ethqos->speed = speed; ethqos_update_link_clk(ethqos, speed); - ethqos_configure(ethqos); + ethqos_configure(ethqos, speed); } static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv) @@ -709,7 +699,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv) if (ret) return ret; - return phy_set_speed(ethqos->serdes_phy, ethqos->speed); + return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed); } static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv) @@ -803,8 +793,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->configure_func = ethqos_configure_rgmii; break; case PHY_INTERFACE_MODE_2500BASEX: - plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500; - fallthrough; case PHY_INTERFACE_MODE_SGMII: ethqos->configure_func = ethqos_configure_sgmii; break; @@ -847,7 +835,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy), "Failed to get serdes phy\n"); - ethqos->speed = SPEED_1000; ethqos->serdes_speed = SPEED_1000; ethqos_update_link_clk(ethqos, SPEED_1000); ethqos_set_func_clk_en(ethqos); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c new file mode 100644 index 000000000000..9a774046455b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH + * + * The Rx and Tx clocks are supplied as follows for the GBETH IP. + * + * Rx / Tx + * -------+------------- on / off ------- + * | + * | Rx-180 / Tx-180 + * +---- not ---- on / off ------- + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include "stmmac_platform.h" + +struct renesas_gbeth { + struct plat_stmmacenet_data *plat_dat; + struct reset_control *rstc; + struct device *dev; +}; + +static const char *const renesas_gbeth_clks[] = { + "tx", "tx-180", "rx", "rx-180", +}; + +static int renesas_gbeth_init(struct platform_device *pdev, void *priv) +{ + struct plat_stmmacenet_data *plat_dat; + struct renesas_gbeth *gbeth = priv; + int ret; + + plat_dat = gbeth->plat_dat; + + ret = reset_control_deassert(gbeth->rstc); + if (ret) { + dev_err(gbeth->dev, "Reset deassert failed\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(plat_dat->num_clks, + plat_dat->clks); + if (ret) + reset_control_assert(gbeth->rstc); + + return ret; +} + +static void renesas_gbeth_exit(struct platform_device *pdev, void *priv) +{ + struct plat_stmmacenet_data *plat_dat; + struct renesas_gbeth *gbeth = priv; + int ret; + + plat_dat = gbeth->plat_dat; + + clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); + + ret = reset_control_assert(gbeth->rstc); + if (ret) + dev_err(gbeth->dev, "Reset assert failed\n"); +} + +static int renesas_gbeth_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct device *dev = &pdev->dev; + struct renesas_gbeth *gbeth; + unsigned int i; + int err; + + err = stmmac_get_platform_resources(pdev, &stmmac_res); + if (err) + return dev_err_probe(dev, err, + "failed to get resources\n"); + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); + + gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL); + if (!gbeth) + return -ENOMEM; + + plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks); + plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks, + sizeof(*plat_dat->clks), GFP_KERNEL); + if (!plat_dat->clks) + return -ENOMEM; + + for (i = 0; i < plat_dat->num_clks; i++) + plat_dat->clks[i].id = renesas_gbeth_clks[i]; + + err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks); + if (err < 0) + return err; + + plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx"); + if (!plat_dat->clk_tx_i) + return dev_err_probe(dev, -EINVAL, + "error finding tx clock\n"); + + gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(gbeth->rstc)) + return PTR_ERR(gbeth->rstc); + + gbeth->dev = dev; + gbeth->plat_dat = plat_dat; + plat_dat->bsp_priv = gbeth; + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->init = renesas_gbeth_init; + plat_dat->exit = renesas_gbeth_exit; + plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY | + STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP | + STMMAC_FLAG_SPH_DISABLE; + + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); +} + +static const struct of_device_id renesas_gbeth_match[] = { + { .compatible = "renesas,rzv2h-gbeth", }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, renesas_gbeth_match); + +static struct platform_driver renesas_gbeth_driver = { + .probe = renesas_gbeth_probe, + .driver = { + .name = "renesas-gbeth", + .of_match_table = renesas_gbeth_match, + }, +}; +module_platform_driver(renesas_gbeth_driver); + +MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 116855658559..72b50f6d72f4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -50,6 +50,7 @@ struct socfpga_dwmac { u32 reg_offset; u32 reg_shift; struct device *dev; + struct plat_stmmacenet_data *plat_dat; struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; struct reset_control *stmmac_ocp_rst; @@ -58,17 +59,15 @@ struct socfpga_dwmac { void __iomem *sgmii_adapter_base; bool f2h_ptp_ref_clk; const struct socfpga_dwmac_ops *ops; - struct mdio_device *pcs_mdiodev; }; -static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode) +static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed, + unsigned int mode) { - struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; + struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv; + struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev)); void __iomem *splitter_base = dwmac->splitter_base; void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base; - struct device *dev = dwmac->dev; - struct net_device *ndev = dev_get_drvdata(dev); - struct phy_device *phy_dev = ndev->phydev; u32 val; if (sgmii_adapter_base) @@ -95,7 +94,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (phy_dev && sgmii_adapter_base) + if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII || + priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) && + sgmii_adapter_base) writew(SGMII_ADAPTER_ENABLE, sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); } @@ -233,10 +234,7 @@ err_node_put: static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac) { - struct net_device *ndev = dev_get_drvdata(dwmac->dev); - struct stmmac_priv *priv = netdev_priv(ndev); - - return priv->plat->mac_interface; + return dwmac->plat_dat->mac_interface; } static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable) @@ -258,6 +256,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; break; case PHY_INTERFACE_MODE_RMII: @@ -435,6 +434,13 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv, return priv->hw->phylink_pcs; } +static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv) +{ + struct socfpga_dwmac *dwmac = bsp_priv; + + return dwmac->ops->set_phy_mode(dwmac); +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -442,8 +448,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret; struct socfpga_dwmac *dwmac; - struct net_device *ndev; - struct stmmac_priv *stpriv; const struct socfpga_dwmac_ops *ops; ops = device_get_match_data(&pdev->dev); @@ -479,9 +483,17 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) return ret; } + /* The socfpga driver needs to control the stmmac reset to set the phy + * mode. Create a copy of the core reset handle so it can be used by + * the driver later. + */ + dwmac->stmmac_rst = plat_dat->stmmac_rst; dwmac->ops = ops; + dwmac->plat_dat = plat_dat; + plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; + plat_dat->init = socfpga_dwmac_init; plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; plat_dat->select_pcs = socfpga_dwmac_select_pcs; @@ -489,67 +501,9 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->riwt_off = 1; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - return ret; - - ndev = platform_get_drvdata(pdev); - stpriv = netdev_priv(ndev); - - /* The socfpga driver needs to control the stmmac reset to set the phy - * mode. Create a copy of the core reset handle so it can be used by - * the driver later. - */ - dwmac->stmmac_rst = stpriv->plat->stmmac_rst; - - ret = ops->set_phy_mode(dwmac); - if (ret) - goto err_dvr_remove; - - return 0; - -err_dvr_remove: - stmmac_dvr_remove(&pdev->dev); - - return ret; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } -#ifdef CONFIG_PM_SLEEP -static int socfpga_dwmac_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev); - - dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv); - - return stmmac_resume(dev); -} -#endif /* CONFIG_PM_SLEEP */ - -static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct stmmac_priv *priv = netdev_priv(ndev); - - stmmac_bus_clks_config(priv, false); - - return 0; -} - -static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct stmmac_priv *priv = netdev_priv(ndev); - - return stmmac_bus_clks_config(priv, true); -} - -static const struct dev_pm_ops socfpga_dwmac_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume) - SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL) -}; - static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, }; @@ -567,10 +521,9 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); static struct platform_driver socfpga_dwmac_driver = { .probe = socfpga_dwmac_probe, - .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", - .pm = &socfpga_dwmac_pm_ops, + .pm = &stmmac_pltfr_pm_ops, .of_match_table = socfpga_dwmac_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index be57c6c12c1c..53d5ce1f6dc6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -23,12 +23,7 @@ #define DWMAC_50MHZ 50000000 -#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ - iface == PHY_INTERFACE_MODE_RGMII_ID || \ - iface == PHY_INTERFACE_MODE_RGMII_RXID || \ - iface == PHY_INTERFACE_MODE_RGMII_TXID) - -#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ +#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \ iface == PHY_INTERFACE_MODE_GMII) /* STiH4xx register definitions (STiH407/STiH410 families) @@ -148,7 +143,7 @@ static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode) src = TX_RETIME_SRC_CLKGEN; freq = DWMAC_50MHZ; } - } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + } else if (phy_interface_mode_is_rgmii(dwmac->interface)) { /* On GiGa clk source can be either ext or from clkgen */ freq = rgmii_clock(spd); @@ -238,6 +233,29 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, return 0; } +static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv) +{ + struct sti_dwmac *dwmac = bsp_priv; + int ret; + + ret = clk_prepare_enable(dwmac->clk); + if (ret) + return ret; + + ret = sti_dwmac_set_mode(dwmac); + if (ret) + clk_disable_unprepare(dwmac->clk); + + return ret; +} + +static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv) +{ + struct sti_dwmac *dwmac = bsp_priv; + + clk_disable_unprepare(dwmac->clk); +} + static int sti_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -274,59 +292,12 @@ static int sti_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = data->fix_retime_src; + plat_dat->init = sti_dwmac_init; + plat_dat->exit = sti_dwmac_exit; - ret = clk_prepare_enable(dwmac->clk); - if (ret) - return ret; - - ret = sti_dwmac_set_mode(dwmac); - if (ret) - goto disable_clk; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto disable_clk; - - return 0; - -disable_clk: - clk_disable_unprepare(dwmac->clk); - - return ret; -} - -static void sti_dwmac_remove(struct platform_device *pdev) -{ - struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); - - stmmac_dvr_remove(&pdev->dev); - - clk_disable_unprepare(dwmac->clk); + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } -static int sti_dwmac_suspend(struct device *dev) -{ - struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev); - int ret = stmmac_suspend(dev); - - clk_disable_unprepare(dwmac->clk); - - return ret; -} - -static int sti_dwmac_resume(struct device *dev) -{ - struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev); - - clk_prepare_enable(dwmac->clk); - sti_dwmac_set_mode(dwmac); - - return stmmac_resume(dev); -} - -static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend, - sti_dwmac_resume); - static const struct sti_dwmac_of_data stih4xx_dwmac_data = { .fix_retime_src = stih4xx_fix_retime_src, }; @@ -339,10 +310,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match); static struct platform_driver sti_dwmac_driver = { .probe = sti_dwmac_probe, - .remove = sti_dwmac_remove, .driver = { .name = "sti-dwmac", - .pm = pm_sleep_ptr(&sti_dwmac_pm_ops), + .pm = &stmmac_pltfr_pm_ops, .of_match_table = sti_dwmac_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index c3d321192581..1eb16eec9c0d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -119,7 +119,7 @@ struct stm32_ops { u32 syscfg_clr_off; }; -static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) +static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac) { int ret; @@ -127,11 +127,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) if (ret) goto err_clk_tx; - if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) { - ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) - goto err_clk_rx; - } + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) + goto err_clk_rx; ret = clk_prepare_enable(dwmac->syscfg_clk); if (ret) @@ -148,15 +146,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) err_clk_eth_ck: clk_disable_unprepare(dwmac->syscfg_clk); err_syscfg_clk: - if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) - clk_disable_unprepare(dwmac->clk_rx); + clk_disable_unprepare(dwmac->clk_rx); err_clk_rx: clk_disable_unprepare(dwmac->clk_tx); err_clk_tx: return ret; } -static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) +static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; int ret; @@ -167,7 +164,7 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) return ret; } - return stm32_dwmac_clk_enable(dwmac, resume); + return stm32_dwmac_clk_enable(dwmac); } static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat) @@ -382,12 +379,10 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) SYSCFG_MCU_ETH_MASK, val << 23); } -static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) +static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) { clk_disable_unprepare(dwmac->clk_tx); - if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend) - clk_disable_unprepare(dwmac->clk_rx); - + clk_disable_unprepare(dwmac->clk_rx); clk_disable_unprepare(dwmac->syscfg_clk); if (dwmac->enable_eth_ck) clk_disable_unprepare(dwmac->clk_eth_ck); @@ -541,18 +536,32 @@ static int stm32_dwmac_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP; plat_dat->bsp_priv = dwmac; - ret = stm32_dwmac_init(plat_dat, false); + ret = stm32_dwmac_init(plat_dat); if (ret) return ret; + /* If this platform requires the clock to be running in suspend, + * prepare and enable the receive clock an additional time to keep + * it running. + */ + if (dwmac->ops->clk_rx_enable_in_suspend) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) + goto err_clk_disable; + } + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - goto err_clk_disable; + goto err_clk_disable_suspend; return 0; +err_clk_disable_suspend: + if (dwmac->ops->clk_rx_enable_in_suspend) + clk_disable_unprepare(dwmac->clk_rx); + err_clk_disable: - stm32_dwmac_clk_disable(dwmac, false); + stm32_dwmac_clk_disable(dwmac); return ret; } @@ -565,7 +574,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev) stmmac_dvr_remove(&pdev->dev); - stm32_dwmac_clk_disable(dwmac, false); + /* If this platform requires the clock to be running in suspend, + * we need to disable and unprepare the receive clock an additional + * time to balance the extra clk_prepare_enable() in the probe + * function. + */ + if (dwmac->ops->clk_rx_enable_in_suspend) + clk_disable_unprepare(dwmac->clk_rx); + + stm32_dwmac_clk_disable(dwmac); if (dwmac->irq_pwr_wakeup >= 0) { dev_pm_clear_wake_irq(&pdev->dev); @@ -596,7 +613,7 @@ static int stm32_dwmac_suspend(struct device *dev) if (ret) return ret; - stm32_dwmac_clk_disable(dwmac, true); + stm32_dwmac_clk_disable(dwmac); if (dwmac->ops->suspend) ret = dwmac->ops->suspend(dwmac); @@ -614,7 +631,7 @@ static int stm32_dwmac_resume(struct device *dev) if (dwmac->ops->resume) dwmac->ops->resume(dwmac); - ret = stm32_dwmac_init(priv->plat, true); + ret = stm32_dwmac_init(priv->plat); if (ret) return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6c7e8655a7eb..2796dc426943 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1239,14 +1239,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); + ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); if (ret) goto dwmac_syscon; - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto dwmac_exit; - ndev = dev_get_drvdata(&pdev->dev); priv = netdev_priv(ndev); @@ -1283,9 +1279,7 @@ dwmac_mux: clk_put(gmac->ephy_clk); dwmac_remove: pm_runtime_put_noidle(&pdev->dev); - stmmac_dvr_remove(&pdev->dev); -dwmac_exit: - sun8i_dwmac_exit(pdev, gmac); + stmmac_pltfr_remove(pdev); dwmac_syscon: sun8i_dwmac_unset_syscon(gmac); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 9f098ff0ff05..1eadcf5d1ad6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) regulator_disable(gmac->regulator); } -static void sun7i_fix_speed(void *priv, int speed, unsigned int mode) +static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { - struct sunxi_priv_data *gmac = priv; - - /* only GMII mode requires us to reconfigure the clock lines */ - if (gmac->interface != PHY_INTERFACE_MODE_GMII) - return; - - if (gmac->clk_enabled) { - clk_disable(gmac->tx_clk); - gmac->clk_enabled = 0; - } - clk_unprepare(gmac->tx_clk); - - if (speed == 1000) { - clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); - clk_prepare_enable(gmac->tx_clk); - gmac->clk_enabled = 1; - } else { - clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); - clk_prepare(gmac->tx_clk); + struct sunxi_priv_data *gmac = bsp_priv; + + if (interface == PHY_INTERFACE_MODE_GMII) { + if (gmac->clk_enabled) { + clk_disable(gmac->tx_clk); + gmac->clk_enabled = 0; + } + clk_unprepare(gmac->tx_clk); + + if (speed == 1000) { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); + clk_prepare_enable(gmac->tx_clk); + gmac->clk_enabled = 1; + } else { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); + clk_prepare(gmac->tx_clk); + } } + return 0; } static int sun7i_gmac_probe(struct platform_device *pdev) @@ -140,24 +140,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->init = sun7i_gmac_init; plat_dat->exit = sun7i_gmac_exit; - plat_dat->fix_mac_speed = sun7i_fix_speed; + plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate; plat_dat->tx_fifo_size = 4096; plat_dat->rx_fifo_size = 16384; - ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); - if (ret) - return ret; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_gmac_exit; - - return 0; - -err_gmac_exit: - sun7i_gmac_exit(pdev, plat_dat->bsp_priv); - - return ret; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct of_device_id sun7i_dwmac_match[] = { @@ -168,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match); static struct platform_driver sun7i_dwmac_driver = { .probe = sun7i_gmac_probe, - .remove = stmmac_pltfr_remove, .driver = { .name = "sun7i-dwmac", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index 33cf99797df5..5e6ac82a89b9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -51,21 +51,14 @@ struct visconti_eth { u32 phy_intf_sel; struct clk *phy_ref_clk; struct device *dev; - spinlock_t lock; /* lock to protect register update */ }; -static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode) +static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { - struct visconti_eth *dwmac = priv; + struct visconti_eth *dwmac = bsp_priv; struct net_device *netdev = dev_get_drvdata(dwmac->dev); unsigned int val, clk_sel_val = 0; - unsigned long flags; - - spin_lock_irqsave(&dwmac->lock, flags); - - /* adjust link */ - val = readl(dwmac->reg + MAC_CTRL_REG); - val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES); switch (speed) { case SPEED_1000: @@ -77,24 +70,19 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode) clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M; if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII) clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2; - val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES; break; case SPEED_10: if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII) clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M; if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII) clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20; - val |= GMAC_CONFIG_PS; break; default: /* No bit control */ netdev_err(netdev, "Unsupported speed request (%d)", speed); - spin_unlock_irqrestore(&dwmac->lock, flags); - return; + return -EINVAL; } - writel(val, dwmac->reg + MAC_CTRL_REG); - /* Stop internal clock */ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL); val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN); @@ -136,7 +124,7 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode) break; } - spin_unlock_irqrestore(&dwmac->lock, flags); + return 0; } static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat) @@ -228,11 +216,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; dwmac->dev = &pdev->dev; plat_dat->bsp_priv = dwmac; - plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed; + plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate; ret = visconti_eth_clock_probe(pdev, plat_dat); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 42fe29a4e300..f4694fd576f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -17,11 +17,7 @@ #define GMAC_EXT_CONFIG 0x00000004 #define GMAC_PACKET_FILTER 0x00000008 #define GMAC_HASH_TAB(x) (0x10 + (x) * 4) -#define GMAC_VLAN_TAG 0x00000050 -#define GMAC_VLAN_TAG_DATA 0x00000054 -#define GMAC_VLAN_HASH_TABLE 0x00000058 #define GMAC_RX_FLOW_CTRL 0x00000090 -#define GMAC_VLAN_INCL 0x00000060 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) #define GMAC_TXQ_PRTY_MAP0 0x98 #define GMAC_TXQ_PRTY_MAP1 0x9C @@ -31,7 +27,6 @@ #define GMAC_RXQ_CTRL3 0x000000ac #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 -#define GMAC_1US_TIC_COUNTER 0x000000dc #define GMAC_PCS_BASE 0x000000e0 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 @@ -82,42 +77,6 @@ #define GMAC_MAX_PERFECT_ADDRESSES 128 -/* MAC VLAN */ -#define GMAC_VLAN_EDVLP BIT(26) -#define GMAC_VLAN_VTHM BIT(25) -#define GMAC_VLAN_DOVLTC BIT(20) -#define GMAC_VLAN_ESVL BIT(18) -#define GMAC_VLAN_ETV BIT(16) -#define GMAC_VLAN_VID GENMASK(15, 0) -#define GMAC_VLAN_VLTI BIT(20) -#define GMAC_VLAN_CSVL BIT(19) -#define GMAC_VLAN_VLC GENMASK(17, 16) -#define GMAC_VLAN_VLC_SHIFT 16 -#define GMAC_VLAN_VLHT GENMASK(15, 0) - -/* MAC VLAN Tag */ -#define GMAC_VLAN_TAG_VID GENMASK(15, 0) -#define GMAC_VLAN_TAG_ETV BIT(16) - -/* MAC VLAN Tag Control */ -#define GMAC_VLAN_TAG_CTRL_OB BIT(0) -#define GMAC_VLAN_TAG_CTRL_CT BIT(1) -#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) -#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2 -#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) -#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21 -#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24) - -#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) -#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) -#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) -#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT) - -/* MAC VLAN Tag Data/Filter */ -#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0) -#define GMAC_VLAN_TAG_DATA_VEN BIT(16) -#define GMAC_VLAN_TAG_DATA_ETV BIT(17) - /* MAC RX Queue Enable */ #define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2)) #define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index cc4ddf608652..9c2549d4100f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -18,6 +18,7 @@ #include "stmmac.h" #include "stmmac_fpe.h" #include "stmmac_pcs.h" +#include "stmmac_vlan.h" #include "dwmac4.h" #include "dwmac5.h" @@ -448,165 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); } -static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) -{ - void __iomem *ioaddr = (void __iomem *)dev->base_addr; - u32 val; - - val = readl(ioaddr + GMAC_VLAN_TAG); - val &= ~GMAC_VLAN_TAG_VID; - val |= GMAC_VLAN_TAG_ETV | vid; - - writel(val, ioaddr + GMAC_VLAN_TAG); -} - -static int dwmac4_write_vlan_filter(struct net_device *dev, - struct mac_device_info *hw, - u8 index, u32 data) -{ - void __iomem *ioaddr = (void __iomem *)dev->base_addr; - int ret; - u32 val; - - if (index >= hw->num_vlan) - return -EINVAL; - - writel(data, ioaddr + GMAC_VLAN_TAG_DATA); - - val = readl(ioaddr + GMAC_VLAN_TAG); - val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | - GMAC_VLAN_TAG_CTRL_CT | - GMAC_VLAN_TAG_CTRL_OB); - val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; - - writel(val, ioaddr + GMAC_VLAN_TAG); - - ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val, - !(val & GMAC_VLAN_TAG_CTRL_OB), - 1000, 500000); - if (ret) { - netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); - return -EBUSY; - } - - return 0; -} - -static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, - struct mac_device_info *hw, - __be16 proto, u16 vid) -{ - int index = -1; - u32 val = 0; - int i, ret; - - if (vid > 4095) - return -EINVAL; - - /* Single Rx VLAN Filter */ - if (hw->num_vlan == 1) { - /* For single VLAN filter, VID 0 means VLAN promiscuous */ - if (vid == 0) { - netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); - return -EPERM; - } - - if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { - netdev_err(dev, "Only single VLAN ID supported\n"); - return -EPERM; - } - - hw->vlan_filter[0] = vid; - dwmac4_write_single_vlan(dev, vid); - - return 0; - } - - /* Extended Rx VLAN Filter Enable */ - val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; - - for (i = 0; i < hw->num_vlan; i++) { - if (hw->vlan_filter[i] == val) - return 0; - else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) - index = i; - } - - if (index == -1) { - netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", - hw->num_vlan); - return -EPERM; - } - - ret = dwmac4_write_vlan_filter(dev, hw, index, val); - - if (!ret) - hw->vlan_filter[index] = val; - - return ret; -} - -static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, - struct mac_device_info *hw, - __be16 proto, u16 vid) -{ - int i, ret = 0; - - /* Single Rx VLAN Filter */ - if (hw->num_vlan == 1) { - if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { - hw->vlan_filter[0] = 0; - dwmac4_write_single_vlan(dev, 0); - } - return 0; - } - - /* Extended Rx VLAN Filter Enable */ - for (i = 0; i < hw->num_vlan; i++) { - if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { - ret = dwmac4_write_vlan_filter(dev, hw, i, 0); - - if (!ret) - hw->vlan_filter[i] = 0; - else - return ret; - } - } - - return ret; -} - -static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, - struct mac_device_info *hw) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; - u32 hash; - u32 val; - int i; - - /* Single Rx VLAN Filter */ - if (hw->num_vlan == 1) { - dwmac4_write_single_vlan(dev, hw->vlan_filter[0]); - return; - } - - /* Extended Rx VLAN Filter Enable */ - for (i = 0; i < hw->num_vlan; i++) { - if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { - val = hw->vlan_filter[i]; - dwmac4_write_vlan_filter(dev, hw, i, val); - } - } - - hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE); - if (hash & GMAC_VLAN_VLHT) { - value = readl(ioaddr + GMAC_VLAN_TAG); - value |= GMAC_VLAN_VTHM; - writel(value, ioaddr + GMAC_VLAN_TAG); - } -} - static void dwmac4_set_filter(struct mac_device_info *hw, struct net_device *dev) { @@ -965,45 +807,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) writel(value, ioaddr + GMAC_CONFIG); } -static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; - - writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); - - value = readl(ioaddr + GMAC_VLAN_TAG); - - if (hash) { - value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; - if (is_double) { - value |= GMAC_VLAN_EDVLP; - value |= GMAC_VLAN_ESVL; - value |= GMAC_VLAN_DOVLTC; - } - - writel(value, ioaddr + GMAC_VLAN_TAG); - } else if (perfect_match) { - u32 value = GMAC_VLAN_ETV; - - if (is_double) { - value |= GMAC_VLAN_EDVLP; - value |= GMAC_VLAN_ESVL; - value |= GMAC_VLAN_DOVLTC; - } - - writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); - } else { - value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); - value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); - value &= ~GMAC_VLAN_DOVLTC; - value &= ~GMAC_VLAN_VID; - - writel(value, ioaddr + GMAC_VLAN_TAG); - } -} - static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) { u32 value = readl(ioaddr + GMAC_CONFIG); @@ -1014,19 +817,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) writel(value, ioaddr + GMAC_CONFIG); } -static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; - - value = readl(ioaddr + GMAC_VLAN_INCL); - value |= GMAC_VLAN_VLTI; - value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ - value &= ~GMAC_VLAN_VLC; - value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; - writel(value, ioaddr + GMAC_VLAN_INCL); -} - static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, u32 addr) { @@ -1143,35 +933,6 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, return 0; } -static void dwmac4_rx_hw_vlan(struct mac_device_info *hw, - struct dma_desc *rx_desc, struct sk_buff *skb) -{ - if (hw->desc->get_rx_vlan_valid(rx_desc)) { - u16 vid = hw->desc->get_rx_vlan_tci(rx_desc); - - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } -} - -static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_VLAN_TAG); - - value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK; - - if (hw->hw_vlan_en) - /* Always strip VLAN on Receive */ - value |= GMAC_VLAN_TAG_STRIP_ALL; - else - /* Do not strip VLAN on Receive */ - value |= GMAC_VLAN_TAG_STRIP_NONE; - - /* Enable outer VLAN Tag in Rx DMA descriptor */ - value |= GMAC_VLAN_TAG_CTRL_EVLRXS; - writel(value, ioaddr + GMAC_VLAN_TAG); -} - const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .update_caps = dwmac4_update_caps, @@ -1201,17 +962,10 @@ const struct stmmac_ops dwmac4_ops = { .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, .set_mac_loopback = dwmac4_set_mac_loopback, - .update_vlan_hash = dwmac4_update_vlan_hash, .sarc_configure = dwmac4_sarc_configure, - .enable_vlan = dwmac4_enable_vlan, .set_arp_offload = dwmac4_set_arp_offload, .config_l3_filter = dwmac4_config_l3_filter, .config_l4_filter = dwmac4_config_l4_filter, - .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, - .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, - .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, - .rx_hw_vlan = dwmac4_rx_hw_vlan, - .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, }; const struct stmmac_ops dwmac410_ops = { @@ -1244,18 +998,11 @@ const struct stmmac_ops dwmac410_ops = { .set_filter = dwmac4_set_filter, .flex_pps_config = dwmac5_flex_pps_config, .set_mac_loopback = dwmac4_set_mac_loopback, - .update_vlan_hash = dwmac4_update_vlan_hash, .sarc_configure = dwmac4_sarc_configure, - .enable_vlan = dwmac4_enable_vlan, .set_arp_offload = dwmac4_set_arp_offload, .config_l3_filter = dwmac4_config_l3_filter, .config_l4_filter = dwmac4_config_l4_filter, .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, - .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, - .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, - .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, - .rx_hw_vlan = dwmac4_rx_hw_vlan, - .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, }; const struct stmmac_ops dwmac510_ops = { @@ -1292,51 +1039,13 @@ const struct stmmac_ops dwmac510_ops = { .rxp_config = dwmac5_rxp_config, .flex_pps_config = dwmac5_flex_pps_config, .set_mac_loopback = dwmac4_set_mac_loopback, - .update_vlan_hash = dwmac4_update_vlan_hash, .sarc_configure = dwmac4_sarc_configure, - .enable_vlan = dwmac4_enable_vlan, .set_arp_offload = dwmac4_set_arp_offload, .config_l3_filter = dwmac4_config_l3_filter, .config_l4_filter = dwmac4_config_l4_filter, .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class, - .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, - .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, - .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, - .rx_hw_vlan = dwmac4_rx_hw_vlan, - .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode, }; -static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) -{ - u32 val, num_vlan; - - val = readl(ioaddr + GMAC_HW_FEATURE3); - switch (val & GMAC_HW_FEAT_NRVF) { - case 0: - num_vlan = 1; - break; - case 1: - num_vlan = 4; - break; - case 2: - num_vlan = 8; - break; - case 3: - num_vlan = 16; - break; - case 4: - num_vlan = 24; - break; - case 5: - num_vlan = 32; - break; - default: - num_vlan = 1; - } - - return num_vlan; -} - int dwmac4_setup(struct stmmac_priv *priv) { struct mac_device_info *mac = priv->hw; @@ -1368,7 +1077,7 @@ int dwmac4_setup(struct stmmac_priv *priv) mac->mii.reg_mask = GENMASK(20, 16); mac->mii.clk_csr_shift = 8; mac->mii.clk_csr_mask = GENMASK(11, 8); - mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr); + mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index a03f5d771566..0d408ee17f33 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -57,19 +57,6 @@ #define XGMAC_FILTER_PR BIT(0) #define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) #define XGMAC_MAX_HASH_TABLE 8 -#define XGMAC_VLAN_TAG 0x00000050 -#define XGMAC_VLAN_EDVLP BIT(26) -#define XGMAC_VLAN_VTHM BIT(25) -#define XGMAC_VLAN_DOVLTC BIT(20) -#define XGMAC_VLAN_ESVL BIT(18) -#define XGMAC_VLAN_ETV BIT(16) -#define XGMAC_VLAN_VID GENMASK(15, 0) -#define XGMAC_VLAN_HASH_TABLE 0x00000058 -#define XGMAC_VLAN_INCL 0x00000060 -#define XGMAC_VLAN_VLTI BIT(20) -#define XGMAC_VLAN_CSVL BIT(19) -#define XGMAC_VLAN_VLC GENMASK(17, 16) -#define XGMAC_VLAN_VLC_SHIFT 16 #define XGMAC_RXQ_CTRL0 0x000000a0 #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) @@ -477,6 +464,7 @@ #define XGMAC_RDES3_RSV BIT(26) #define XGMAC_RDES3_L34T GENMASK(23, 20) #define XGMAC_RDES3_L34T_SHIFT 20 +#define XGMAC_RDES3_ET_LT GENMASK(19, 16) #define XGMAC_L34T_IP4TCP 0x1 #define XGMAC_L34T_IP4UDP 0x2 #define XGMAC_L34T_IP6TCP 0x9 @@ -486,6 +474,17 @@ #define XGMAC_RDES3_TSD BIT(6) #define XGMAC_RDES3_TSA BIT(4) +/* RDES0 (write back format) */ +#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0) + +/* Error Type or L2 Type(ET/LT) Field Number */ +#define XGMAC_ET_LT_VLAN_STAG 8 +#define XGMAC_ET_LT_VLAN_CTAG 9 +#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10 +#define XGMAC_ET_LT_DVLAN_STAG_STAG 11 +#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12 +#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13 + extern const struct stmmac_ops dwxgmac210_ops; extern const struct stmmac_ops dwxlgmac2_ops; extern const struct stmmac_dma_ops dwxgmac210_dma_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index a6d395c6bacd..6cadf8de4fdf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -10,6 +10,7 @@ #include "stmmac.h" #include "stmmac_fpe.h" #include "stmmac_ptp.h" +#include "stmmac_vlan.h" #include "dwxlgmac2.h" #include "dwxgmac2.h" @@ -614,76 +615,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw, return 0; } -static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double) -{ - void __iomem *ioaddr = hw->pcsr; - - writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE); - - if (hash) { - u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); - - value |= XGMAC_FILTER_VTFE; - - writel(value, ioaddr + XGMAC_PACKET_FILTER); - - value = readl(ioaddr + XGMAC_VLAN_TAG); - - value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; - if (is_double) { - value |= XGMAC_VLAN_EDVLP; - value |= XGMAC_VLAN_ESVL; - value |= XGMAC_VLAN_DOVLTC; - } else { - value &= ~XGMAC_VLAN_EDVLP; - value &= ~XGMAC_VLAN_ESVL; - value &= ~XGMAC_VLAN_DOVLTC; - } - - value &= ~XGMAC_VLAN_VID; - writel(value, ioaddr + XGMAC_VLAN_TAG); - } else if (perfect_match) { - u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); - - value |= XGMAC_FILTER_VTFE; - - writel(value, ioaddr + XGMAC_PACKET_FILTER); - - value = readl(ioaddr + XGMAC_VLAN_TAG); - - value &= ~XGMAC_VLAN_VTHM; - value |= XGMAC_VLAN_ETV; - if (is_double) { - value |= XGMAC_VLAN_EDVLP; - value |= XGMAC_VLAN_ESVL; - value |= XGMAC_VLAN_DOVLTC; - } else { - value &= ~XGMAC_VLAN_EDVLP; - value &= ~XGMAC_VLAN_ESVL; - value &= ~XGMAC_VLAN_DOVLTC; - } - - value &= ~XGMAC_VLAN_VID; - writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG); - } else { - u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); - - value &= ~XGMAC_FILTER_VTFE; - - writel(value, ioaddr + XGMAC_PACKET_FILTER); - - value = readl(ioaddr + XGMAC_VLAN_TAG); - - value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV); - value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL); - value &= ~XGMAC_VLAN_DOVLTC; - value &= ~XGMAC_VLAN_VID; - - writel(value, ioaddr + XGMAC_VLAN_TAG); - } -} - struct dwxgmac3_error_desc { bool valid; const char *desc; @@ -1300,19 +1231,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val) writel(value, ioaddr + XGMAC_TX_CONFIG); } -static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; - - value = readl(ioaddr + XGMAC_VLAN_INCL); - value |= XGMAC_VLAN_VLTI; - value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */ - value &= ~XGMAC_VLAN_VLC; - value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC; - writel(value, ioaddr + XGMAC_VLAN_INCL); -} - static int dwxgmac2_filter_wait(struct mac_device_info *hw) { void __iomem *ioaddr = hw->pcsr; @@ -1534,12 +1452,10 @@ const struct stmmac_ops dwxgmac210_ops = { .safety_feat_dump = dwxgmac3_safety_feat_dump, .set_mac_loopback = dwxgmac2_set_mac_loopback, .rss_configure = dwxgmac2_rss_configure, - .update_vlan_hash = dwxgmac2_update_vlan_hash, .rxp_config = dwxgmac3_rxp_config, .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, .flex_pps_config = dwxgmac2_flex_pps_config, .sarc_configure = dwxgmac2_sarc_configure, - .enable_vlan = dwxgmac2_enable_vlan, .config_l3_filter = dwxgmac2_config_l3_filter, .config_l4_filter = dwxgmac2_config_l4_filter, .set_arp_offload = dwxgmac2_set_arp_offload, @@ -1590,12 +1506,10 @@ const struct stmmac_ops dwxlgmac2_ops = { .safety_feat_dump = dwxgmac3_safety_feat_dump, .set_mac_loopback = dwxgmac2_set_mac_loopback, .rss_configure = dwxgmac2_rss_configure, - .update_vlan_hash = dwxgmac2_update_vlan_hash, .rxp_config = dwxgmac3_rxp_config, .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp, .flex_pps_config = dwxgmac2_flex_pps_config, .sarc_configure = dwxgmac2_sarc_configure, - .enable_vlan = dwxgmac2_enable_vlan, .config_l3_filter = dwxgmac2_config_l3_filter, .config_l4_filter = dwxgmac2_config_l4_filter, .set_arp_offload = dwxgmac2_set_arp_offload, @@ -1638,6 +1552,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv) mac->mii.reg_mask = GENMASK(15, 0); mac->mii.clk_csr_shift = 19; mac->mii.clk_csr_mask = GENMASK(21, 19); + mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 389aad7b5c1e..a2980482fcce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -4,6 +4,7 @@ * stmmac XGMAC support. */ +#include <linux/bitfield.h> #include <linux/stmmac.h> #include "common.h" #include "dwxgmac2.h" @@ -69,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p) return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0; } +static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p) +{ + return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK; +} + +static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p) +{ + u32 et_lt; + + et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3)); + + return et_lt >= XGMAC_ET_LT_VLAN_STAG && + et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG; +} + static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe) { return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL); @@ -351,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = { .set_tx_owner = dwxgmac2_set_tx_owner, .set_rx_owner = dwxgmac2_set_rx_owner, .get_tx_ls = dwxgmac2_get_tx_ls, + .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci, + .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid, .get_rx_frame_len = dwxgmac2_get_rx_frame_len, .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp, .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 31bdbab9a46c..99635b37044a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -9,6 +9,7 @@ #include "stmmac_fpe.h" #include "stmmac_ptp.h" #include "stmmac_est.h" +#include "stmmac_vlan.h" #include "dwmac4_descs.h" #include "dwxgmac2.h" @@ -120,6 +121,7 @@ static const struct stmmac_hwif_entry { const void *tc; const void *mmc; const void *est; + const void *vlan; int (*setup)(struct stmmac_priv *priv); int (*quirks)(struct stmmac_priv *priv); } stmmac_hw[] = { @@ -175,6 +177,7 @@ static const struct stmmac_hwif_entry { .desc = &dwmac4_desc_ops, .dma = &dwmac4_dma_ops, .mac = &dwmac4_ops, + .vlan = &dwmac_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = NULL, @@ -197,6 +200,7 @@ static const struct stmmac_hwif_entry { .desc = &dwmac4_desc_ops, .dma = &dwmac4_dma_ops, .mac = &dwmac410_ops, + .vlan = &dwmac_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, @@ -219,6 +223,7 @@ static const struct stmmac_hwif_entry { .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, .mac = &dwmac410_ops, + .vlan = &dwmac_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, @@ -241,6 +246,7 @@ static const struct stmmac_hwif_entry { .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, .mac = &dwmac510_ops, + .vlan = &dwmac_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, @@ -264,6 +270,7 @@ static const struct stmmac_hwif_entry { .desc = &dwxgmac210_desc_ops, .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, + .vlan = &dwxgmac210_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = NULL, @@ -287,6 +294,7 @@ static const struct stmmac_hwif_entry { .desc = &dwxgmac210_desc_ops, .dma = &dwxgmac210_dma_ops, .mac = &dwxlgmac2_ops, + .vlan = &dwxlgmac2_vlan_ops, .hwtimestamp = &stmmac_ptp, .ptp = &stmmac_ptp_clock_ops, .mode = NULL, @@ -368,6 +376,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv) mac->tc = mac->tc ? : entry->tc; mac->mmc = mac->mmc ? : entry->mmc; mac->est = mac->est ? : entry->est; + mac->vlan = mac->vlan ? : entry->vlan; priv->hw = mac; priv->fpe_cfg.reg = entry->regs.fpe_reg; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 27c63a9fc163..ae4efffb785f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -398,21 +398,6 @@ struct stmmac_ops { /* RSS */ int (*rss_configure)(struct mac_device_info *hw, struct stmmac_rss *cfg, u32 num_rxq); - /* VLAN */ - void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, - u16 perfect_match, bool is_double); - void (*enable_vlan)(struct mac_device_info *hw, u32 type); - void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc, - struct sk_buff *skb); - void (*set_hw_vlan_mode)(struct mac_device_info *hw); - int (*add_hw_vlan_rx_fltr)(struct net_device *dev, - struct mac_device_info *hw, - __be16 proto, u16 vid); - int (*del_hw_vlan_rx_fltr)(struct net_device *dev, - struct mac_device_info *hw, - __be16 proto, u16 vid); - void (*restore_hw_vlan_rx_fltr)(struct net_device *dev, - struct mac_device_info *hw); /* TX Timestamp */ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts); /* Source Address Insertion / Replacement */ @@ -498,20 +483,6 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args) #define stmmac_rss_configure(__priv, __args...) \ stmmac_do_callback(__priv, mac, rss_configure, __args) -#define stmmac_update_vlan_hash(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args) -#define stmmac_enable_vlan(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, enable_vlan, __args) -#define stmmac_rx_hw_vlan(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args) -#define stmmac_set_hw_vlan_mode(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args) -#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \ - stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args) -#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \ - stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args) -#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args) #define stmmac_get_mac_tx_timestamp(__priv, __args...) \ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args) #define stmmac_sarc_configure(__priv, __args...) \ @@ -659,6 +630,39 @@ struct stmmac_est_ops { #define stmmac_est_irq_status(__priv, __args...) \ stmmac_do_void_callback(__priv, est, irq_status, __args) +struct stmmac_vlan_ops { + /* VLAN */ + void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash, + u16 perfect_match, bool is_double); + void (*enable_vlan)(struct mac_device_info *hw, u32 type); + void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc, + struct sk_buff *skb); + void (*set_hw_vlan_mode)(struct mac_device_info *hw); + int (*add_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + int (*del_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid); + void (*restore_hw_vlan_rx_fltr)(struct net_device *dev, + struct mac_device_info *hw); +}; + +#define stmmac_update_vlan_hash(__priv, __args...) \ + stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args) +#define stmmac_enable_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, vlan, enable_vlan, __args) +#define stmmac_rx_hw_vlan(__priv, __args...) \ + stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args) +#define stmmac_set_hw_vlan_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args) +#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args) +#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args) +#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \ + stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args) + struct stmmac_regs_off { const struct stmmac_fpe_reg *fpe_reg; u32 ptp_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index bddfa0f4aa21..cda09cf5dcca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -149,21 +149,9 @@ struct stmmac_channel { }; struct stmmac_fpe_cfg { - /* Serialize access to MAC Merge state between ethtool requests - * and link state updates. - */ - spinlock_t lock; - + struct ethtool_mmsv mmsv; const struct stmmac_fpe_reg *reg; - u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ - - enum ethtool_mm_verify_status status; - struct timer_list verify_timer; - bool verify_enabled; - int verify_retries; - bool pmac_enabled; - u32 verify_time; - bool tx_enabled; + u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ }; struct stmmac_tc_entry { @@ -313,7 +301,7 @@ struct stmmac_priv { unsigned int mode; unsigned int chain_mode; int extend_desc; - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 918a32f8fda8..f702f7b7bf9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -37,7 +37,7 @@ #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { - char stat_string[ETH_GSTRING_LEN]; + char stat_string[ETH_GSTRING_LEN] __nonstring; int sizeof_stat; int stat_offset; }; @@ -1210,36 +1210,16 @@ static int stmmac_get_mm(struct net_device *ndev, struct ethtool_mm_state *state) { struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; u32 frag_size; if (!stmmac_fpe_supported(priv)) return -EOPNOTSUPP; - spin_lock_irqsave(&priv->fpe_cfg.lock, flags); - - state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; - state->verify_enabled = priv->fpe_cfg.verify_enabled; - state->pmac_enabled = priv->fpe_cfg.pmac_enabled; - state->verify_time = priv->fpe_cfg.verify_time; - state->tx_enabled = priv->fpe_cfg.tx_enabled; - state->verify_status = priv->fpe_cfg.status; state->rx_min_frag_size = ETH_ZLEN; - - /* FPE active if common tx_enabled and - * (verification success or disabled(forced)) - */ - if (state->tx_enabled && - (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED || - state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED)) - state->tx_active = true; - else - state->tx_active = false; - frag_size = stmmac_fpe_get_add_frag_size(priv); state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size); - spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags); + ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state); return 0; } @@ -1248,8 +1228,6 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack) { struct stmmac_priv *priv = netdev_priv(ndev); - struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; - unsigned long flags; u32 frag_size; int err; @@ -1258,23 +1236,8 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, if (err) return err; - /* Wait for the verification that's currently in progress to finish */ - timer_shutdown_sync(&fpe_cfg->verify_timer); - - spin_lock_irqsave(&fpe_cfg->lock, flags); - - fpe_cfg->verify_enabled = cfg->verify_enabled; - fpe_cfg->pmac_enabled = cfg->pmac_enabled; - fpe_cfg->verify_time = cfg->verify_time; - fpe_cfg->tx_enabled = cfg->tx_enabled; - - if (!cfg->verify_enabled) - fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; - stmmac_fpe_set_add_frag_size(priv, frag_size); - stmmac_fpe_apply(priv); - - spin_unlock_irqrestore(&fpe_cfg->lock, flags); + ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c index 3a4bee029c7f..75b470ee621a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c @@ -27,12 +27,6 @@ #define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1) #define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0) -/* FPE link-partner hand-shaking mPacket type */ -enum stmmac_mpacket_type { - MPACKET_VERIFY = 0, - MPACKET_RESPONSE = 1, -}; - struct stmmac_fpe_reg { const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */ const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */ @@ -48,10 +42,10 @@ bool stmmac_fpe_supported(struct stmmac_priv *priv) priv->hw->mac->fpe_map_preemption_class; } -static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable, - bool pmac_enable) +static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable) { - struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg; + struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv); + struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg); const struct stmmac_fpe_reg *reg = cfg->reg; u32 num_rxq = priv->plat->rx_queues_to_use; void __iomem *ioaddr = priv->ioaddr; @@ -68,6 +62,15 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable, cfg->fpe_csr = 0; } writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg); +} + +static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable) +{ + struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv); + struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg); + const struct stmmac_fpe_reg *reg = cfg->reg; + void __iomem *ioaddr = priv->ioaddr; + u32 value; value = readl(ioaddr + reg->int_en_reg); @@ -85,47 +88,45 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable, writel(value, ioaddr + reg->int_en_reg); } -static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv, - enum stmmac_mpacket_type type) +static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv, + enum ethtool_mpacket type) { - const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg; + struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv); + struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg); + const struct stmmac_fpe_reg *reg = cfg->reg; void __iomem *ioaddr = priv->ioaddr; - u32 value = priv->fpe_cfg.fpe_csr; + u32 value = cfg->fpe_csr; - if (type == MPACKET_VERIFY) + if (type == ETHTOOL_MPACKET_VERIFY) value |= STMMAC_MAC_FPE_CTRL_STS_SVER; - else if (type == MPACKET_RESPONSE) + else if (type == ETHTOOL_MPACKET_RESPONSE) value |= STMMAC_MAC_FPE_CTRL_STS_SRSP; writel(value, ioaddr + reg->mac_fpe_reg); } +static const struct ethtool_mmsv_ops stmmac_mmsv_ops = { + .configure_tx = stmmac_fpe_configure_tx, + .configure_pmac = stmmac_fpe_configure_pmac, + .send_mpacket = stmmac_fpe_send_mpacket, +}; + static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) { struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; + struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv; - /* This is interrupt context, just spin_lock() */ - spin_lock(&fpe_cfg->lock); - - if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN) - goto unlock_out; + if (status == FPE_EVENT_UNKNOWN) + return; - /* LP has sent verify mPacket */ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) - stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE); + ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET); - /* Local has sent verify mPacket */ - if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER && - fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) - fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; + if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) + ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET); - /* LP has sent response mPacket */ - if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP && - fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING) - fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; - -unlock_out: - spin_unlock(&fpe_cfg->lock); + if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) + ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET); } void stmmac_fpe_irq_status(struct stmmac_priv *priv) @@ -164,119 +165,16 @@ void stmmac_fpe_irq_status(struct stmmac_priv *priv) stmmac_fpe_event_status(priv, status); } -/** - * stmmac_fpe_verify_timer - Timer for MAC Merge verification - * @t: timer_list struct containing private info - * - * Verify the MAC Merge capability in the local TX direction, by - * transmitting Verify mPackets up to 3 times. Wait until link - * partner responds with a Response mPacket, otherwise fail. - */ -static void stmmac_fpe_verify_timer(struct timer_list *t) -{ - struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer); - struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv, - fpe_cfg); - unsigned long flags; - bool rearm = false; - - spin_lock_irqsave(&fpe_cfg->lock, flags); - - switch (fpe_cfg->status) { - case ETHTOOL_MM_VERIFY_STATUS_INITIAL: - case ETHTOOL_MM_VERIFY_STATUS_VERIFYING: - if (fpe_cfg->verify_retries != 0) { - stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY); - rearm = true; - } else { - fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED; - } - - fpe_cfg->verify_retries--; - break; - - case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: - stmmac_fpe_configure(priv, true, true); - break; - - default: - break; - } - - if (rearm) { - mod_timer(&fpe_cfg->verify_timer, - jiffies + msecs_to_jiffies(fpe_cfg->verify_time)); - } - - spin_unlock_irqrestore(&fpe_cfg->lock, flags); -} - -static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg) -{ - if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled && - fpe_cfg->verify_enabled && - fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED && - fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) { - timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0); - mod_timer(&fpe_cfg->verify_timer, jiffies); - } -} - void stmmac_fpe_init(struct stmmac_priv *priv) { - priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; - priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; - priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; - timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0); - spin_lock_init(&priv->fpe_cfg.lock); + ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev, + &stmmac_mmsv_ops); if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) && priv->dma_cap.fpesel) dev_info(priv->device, "FPE is not supported by driver.\n"); } -void stmmac_fpe_apply(struct stmmac_priv *priv) -{ - struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; - - /* If verification is disabled, configure FPE right away. - * Otherwise let the timer code do it. - */ - if (!fpe_cfg->verify_enabled) { - stmmac_fpe_configure(priv, fpe_cfg->tx_enabled, - fpe_cfg->pmac_enabled); - } else { - fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; - fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; - - if (netif_running(priv->dev)) - stmmac_fpe_verify_timer_arm(fpe_cfg); - } -} - -void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) -{ - struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; - unsigned long flags; - - timer_shutdown_sync(&fpe_cfg->verify_timer); - - spin_lock_irqsave(&fpe_cfg->lock, flags); - - if (is_up && fpe_cfg->pmac_enabled) { - /* VERIFY process requires pmac enabled when NIC comes up */ - stmmac_fpe_configure(priv, false, true); - - /* New link => maybe new partner => new verification process */ - stmmac_fpe_apply(priv); - } else { - /* No link => turn off EFPE */ - stmmac_fpe_configure(priv, false, false); - } - - spin_unlock_irqrestore(&fpe_cfg->lock, flags); -} - int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv) { const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h index b884eac7142d..3fc46acf7001 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h @@ -9,15 +9,10 @@ #include <linux/types.h> #include <linux/netdevice.h> -#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3 -#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128 - struct stmmac_priv; -void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up); bool stmmac_fpe_supported(struct stmmac_priv *priv); void stmmac_fpe_init(struct stmmac_priv *priv); -void stmmac_fpe_apply(struct stmmac_priv *priv); void stmmac_fpe_irq_status(struct stmmac_priv *priv); int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv); void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 59d07d0d3369..085c09039af4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -568,18 +568,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, /** * stmmac_hwtstamp_set - control hardware timestamping. * @dev: device pointer. - * @ifr: An IOCTL specific structure, that can contain a pointer to - * a proprietary structure used to pass information to the driver. + * @config: the timestamping configuration. + * @extack: netlink extended ack structure for error reporting. * Description: * This function configures the MAC to enable/disable both outgoing(TX) * and incoming(RX) packets time stamping based on user input. * Return Value: * 0 on success and an appropriate -ve integer on failure. */ -static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int stmmac_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct stmmac_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; u32 ptp_v2 = 0; u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; @@ -590,34 +591,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) u32 ts_event_en = 0; if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { - netdev_alert(priv->dev, "No support for HW time stamping\n"); + NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping"); priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; return -EOPNOTSUPP; } - if (copy_from_user(&config, ifr->ifr_data, - sizeof(config))) - return -EFAULT; + if (!netif_running(dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change timestamping configuration while down"); + return -ENODEV; + } netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", - __func__, config.flags, config.tx_type, config.rx_filter); + __func__, config->flags, config->tx_type, config->rx_filter); - if (config.tx_type != HWTSTAMP_TX_OFF && - config.tx_type != HWTSTAMP_TX_ON) + if (config->tx_type != HWTSTAMP_TX_OFF && + config->tx_type != HWTSTAMP_TX_ON) return -ERANGE; if (priv->adv_ts) { - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: /* time stamp no incoming packet at all */ - config.rx_filter = HWTSTAMP_FILTER_NONE; + config->rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: /* PTP v1, UDP, any kind of event packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* 'xmac' hardware can support Sync, Pdelay_Req and * Pdelay_resp by setting bit14 and bits17/16 to 01 * This leaves Delay_Req timestamps out. @@ -631,7 +634,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: /* PTP v1, UDP, Sync packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; @@ -641,7 +644,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: /* PTP v1, UDP, Delay_req packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; ts_event_en = PTP_TCR_TSEVNTENA; @@ -652,7 +655,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: /* PTP v2, UDP, any kind of event packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ snap_type_sel = PTP_TCR_SNAPTYPSEL_1; @@ -663,7 +666,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: /* PTP v2, UDP, Sync packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; @@ -674,7 +677,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: /* PTP v2, UDP, Delay_req packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; @@ -686,7 +689,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_EVENT: /* PTP v2/802.AS1 any layer, any kind of event packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; snap_type_sel = PTP_TCR_SNAPTYPSEL_1; if (priv->synopsys_id < DWMAC_CORE_4_10) @@ -698,7 +701,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: /* PTP v2/802.AS1, any layer, Sync packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for SYNC messages only */ ts_event_en = PTP_TCR_TSEVNTENA; @@ -710,7 +713,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* PTP v2/802.AS1, any layer, Delay_req packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for Delay_Req messages only */ ts_master_en = PTP_TCR_TSMSTRENA; @@ -724,7 +727,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: /* time stamp any incoming packet */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; tstamp_all = PTP_TCR_TSENALL; break; @@ -732,18 +735,18 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) return -ERANGE; } } else { - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - config.rx_filter = HWTSTAMP_FILTER_NONE; + config->rx_filter = HWTSTAMP_FILTER_NONE; break; default: /* PTP v1, UDP, any kind of event packet */ - config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; } } - priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); - priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; + priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE; + priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON; priv->systime_flags = STMMAC_HWTS_ACTIVE; @@ -756,31 +759,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); - memcpy(&priv->tstamp_config, &config, sizeof(config)); + priv->tstamp_config = *config; - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; } /** * stmmac_hwtstamp_get - read hardware timestamping. * @dev: device pointer. - * @ifr: An IOCTL specific structure, that can contain a pointer to - * a proprietary structure used to pass information to the driver. + * @config: the timestamping configuration. * Description: * This function obtain the current hardware timestamping settings * as requested. */ -static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int stmmac_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct stmmac_priv *priv = netdev_priv(dev); - struct hwtstamp_config *config = &priv->tstamp_config; if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; - return copy_to_user(ifr->ifr_data, config, - sizeof(*config)) ? -EFAULT : 0; + *config = priv->tstamp_config; + + return 0; } /** @@ -946,7 +948,7 @@ static void stmmac_mac_link_down(struct phylink_config *config, stmmac_set_eee_pls(priv, priv->hw, false); if (stmmac_fpe_supported(priv)) - stmmac_fpe_link_state_handle(priv, false); + ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false); } static void stmmac_mac_link_up(struct phylink_config *config, @@ -1064,7 +1066,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, stmmac_set_eee_pls(priv, priv->hw, true); if (stmmac_fpe_supported(priv)) - stmmac_fpe_link_state_handle(priv, true); + ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true); if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) stmmac_hwtstamp_correct_latency(priv, priv); @@ -1258,20 +1260,22 @@ static int stmmac_init_phy(struct net_device *dev) static int stmmac_phy_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; - int mode = priv->plat->phy_interface; + struct phylink_config *config; struct fwnode_handle *fwnode; struct phylink_pcs *pcs; struct phylink *phylink; - priv->phylink_config.dev = &priv->dev->dev; - priv->phylink_config.type = PHYLINK_NETDEV; - priv->phylink_config.mac_managed_pm = true; + config = &priv->phylink_config; + + config->dev = &priv->dev->dev; + config->type = PHYLINK_NETDEV; + config->mac_managed_pm = true; /* Stmmac always requires an RX clock for hardware initialization */ - priv->phylink_config.mac_requires_rxc = true; + config->mac_requires_rxc = true; if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) - priv->phylink_config.eee_rx_clk_stop_enable = true; + config->eee_rx_clk_stop_enable = true; /* Set the default transmit clock stop bit based on the platform glue */ priv->tx_lpi_clk_stop = priv->plat->flags & @@ -1279,13 +1283,22 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) - priv->phylink_config.default_an_inband = - mdio_bus_data->default_an_inband; + config->default_an_inband = mdio_bus_data->default_an_inband; - /* Set the platform/firmware specified interface mode. Note, phylink - * deals with the PHY interface mode, not the MAC interface mode. + /* Get the PHY interface modes (at the PHY end of the link) that + * are supported by the platform. */ - __set_bit(mode, priv->phylink_config.supported_interfaces); + if (priv->plat->get_interfaces) + priv->plat->get_interfaces(priv, priv->plat->bsp_priv, + config->supported_interfaces); + + /* Set the platform/firmware specified interface mode if the + * supported interfaces have not already been provided using + * phy_interface as a last resort. + */ + if (phy_interface_empty(config->supported_interfaces)) + __set_bit(priv->plat->phy_interface, + config->supported_interfaces); /* If we have an xpcs, it defines which PHY interfaces are supported. */ if (priv->hw->xpcs) @@ -1294,29 +1307,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) pcs = priv->hw->phylink_pcs; if (pcs) - phy_interface_or(priv->phylink_config.supported_interfaces, - priv->phylink_config.supported_interfaces, + phy_interface_or(config->supported_interfaces, + config->supported_interfaces, pcs->supported_interfaces); if (priv->dma_cap.eee) { /* Assume all supported interfaces also support LPI */ - memcpy(priv->phylink_config.lpi_interfaces, - priv->phylink_config.supported_interfaces, - sizeof(priv->phylink_config.lpi_interfaces)); + memcpy(config->lpi_interfaces, config->supported_interfaces, + sizeof(config->lpi_interfaces)); /* All full duplex speeds above 100Mbps are supported */ - priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) | - MAC_100FD; - priv->phylink_config.lpi_timer_default = eee_timer * 1000; - priv->phylink_config.eee_enabled_default = true; + config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD; + config->lpi_timer_default = eee_timer * 1000; + config->eee_enabled_default = true; } fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); - phylink = phylink_create(&priv->phylink_config, fwnode, - mode, &stmmac_phylink_mac_ops); + phylink = phylink_create(config, fwnode, priv->plat->phy_interface, + &stmmac_phylink_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); @@ -4152,7 +4163,7 @@ static int stmmac_release(struct net_device *dev) stmmac_release_ptp(priv); if (stmmac_fpe_supported(priv)) - timer_shutdown_sync(&priv->fpe_cfg.verify_timer); + ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); pm_runtime_put(priv->device); @@ -4488,8 +4499,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); - skb_tx_timestamp(skb); - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { /* declare that device is doing timestamping */ @@ -4522,6 +4531,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + skb_tx_timestamp(skb); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4765,8 +4775,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); - skb_tx_timestamp(skb); - /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be * passed to the DMA engine. @@ -4813,7 +4821,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); - + skb_tx_timestamp(skb); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -6219,12 +6227,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) case SIOCSMIIREG: ret = phylink_mii_ioctl(priv->phylink, rq, cmd); break; - case SIOCSHWTSTAMP: - ret = stmmac_hwtstamp_set(dev, rq); - break; - case SIOCGHWTSTAMP: - ret = stmmac_hwtstamp_get(dev, rq); - break; default: break; } @@ -7163,6 +7165,8 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_bpf = stmmac_bpf, .ndo_xdp_xmit = stmmac_xdp_xmit, .ndo_xsk_wakeup = stmmac_xsk_wakeup, + .ndo_hwtstamp_get = stmmac_hwtstamp_get, + .ndo_hwtstamp_set = stmmac_hwtstamp_set, }; static void stmmac_reset_subtask(struct stmmac_priv *priv) @@ -7644,7 +7648,7 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - if (priv->plat->has_gmac4) { + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; priv->hw->hw_vlan_en = true; } @@ -7727,9 +7731,6 @@ int stmmac_dvr_probe(struct device *device, goto error_mdio_register; } - if (priv->plat->speed_mode_2500) - priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); - ret = stmmac_pcs_setup(ndev); if (ret) goto error_pcs_setup; @@ -7871,7 +7872,7 @@ int stmmac_suspend(struct device *dev) rtnl_unlock(); if (stmmac_fpe_supported(priv)) - timer_shutdown_sync(&priv->fpe_cfg.verify_timer); + ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c73eff6a56b8..43c869f64c39 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -709,6 +709,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt); +struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat, + const char *name) +{ + for (int i = 0; i < plat_dat->num_clks; i++) + if (strcmp(plat_dat->clks[i].id, name) == 0) + return plat_dat->clks[i].clk; + + return NULL; +} +EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk); + int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 72dc1a32e46d..6e6561e29d6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -14,6 +14,9 @@ struct plat_stmmacenet_data * devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); +struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat, + const char *name); + int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c new file mode 100644 index 000000000000..0b6f6228ae35 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2025, Altera Corporation + * stmmac VLAN (802.1Q) handling + */ + +#include "stmmac.h" +#include "stmmac_vlan.h" + +static void vlan_write_single(struct net_device *dev, u16 vid) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 val; + + val = readl(ioaddr + VLAN_TAG); + val &= ~VLAN_TAG_VID; + val |= VLAN_TAG_ETV | vid; + + writel(val, ioaddr + VLAN_TAG); +} + +static int vlan_write_filter(struct net_device *dev, + struct mac_device_info *hw, + u8 index, u32 data) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + int ret; + u32 val; + + if (index >= hw->num_vlan) + return -EINVAL; + + writel(data, ioaddr + VLAN_TAG_DATA); + + val = readl(ioaddr + VLAN_TAG); + val &= ~(VLAN_TAG_CTRL_OFS_MASK | + VLAN_TAG_CTRL_CT | + VLAN_TAG_CTRL_OB); + val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB; + + writel(val, ioaddr + VLAN_TAG); + + ret = readl_poll_timeout(ioaddr + VLAN_TAG, val, + !(val & VLAN_TAG_CTRL_OB), + 1000, 500000); + if (ret) { + netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); + return -EBUSY; + } + + return 0; +} + +static int vlan_add_hw_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int index = -1; + u32 val = 0; + int i, ret; + + if (vid > 4095) + return -EINVAL; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + /* For single VLAN filter, VID 0 means VLAN promiscuous */ + if (vid == 0) { + netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); + return -EPERM; + } + + if (hw->vlan_filter[0] & VLAN_TAG_VID) { + netdev_err(dev, "Only single VLAN ID supported\n"); + return -EPERM; + } + + hw->vlan_filter[0] = vid; + vlan_write_single(dev, vid); + + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid; + + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] == val) + return 0; + else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN)) + index = i; + } + + if (index == -1) { + netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", + hw->num_vlan); + return -EPERM; + } + + ret = vlan_write_filter(dev, hw, index, val); + + if (!ret) + hw->vlan_filter[index] = val; + + return ret; +} + +static int vlan_del_hw_rx_fltr(struct net_device *dev, + struct mac_device_info *hw, + __be16 proto, u16 vid) +{ + int i, ret = 0; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) { + hw->vlan_filter[0] = 0; + vlan_write_single(dev, 0); + } + return 0; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) { + ret = vlan_write_filter(dev, hw, i, 0); + + if (!ret) + hw->vlan_filter[i] = 0; + else + return ret; + } + } + + return ret; +} + +static void vlan_restore_hw_rx_fltr(struct net_device *dev, + struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + u32 hash; + u32 val; + int i; + + /* Single Rx VLAN Filter */ + if (hw->num_vlan == 1) { + vlan_write_single(dev, hw->vlan_filter[0]); + return; + } + + /* Extended Rx VLAN Filter Enable */ + for (i = 0; i < hw->num_vlan; i++) { + if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) { + val = hw->vlan_filter[i]; + vlan_write_filter(dev, hw, i, val); + } + } + + hash = readl(ioaddr + VLAN_HASH_TABLE); + if (hash & VLAN_VLHT) { + value = readl(ioaddr + VLAN_TAG); + value |= VLAN_VTHM; + writel(value, ioaddr + VLAN_TAG); + } +} + +static void vlan_update_hash(struct mac_device_info *hw, u32 hash, + u16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(hash, ioaddr + VLAN_HASH_TABLE); + + value = readl(ioaddr + VLAN_TAG); + + if (hash) { + value |= VLAN_VTHM | VLAN_ETV; + if (is_double) { + value |= VLAN_EDVLP; + value |= VLAN_ESVL; + value |= VLAN_DOVLTC; + } + + writel(value, ioaddr + VLAN_TAG); + } else if (perfect_match) { + u32 value = VLAN_ETV; + + if (is_double) { + value |= VLAN_EDVLP; + value |= VLAN_ESVL; + value |= VLAN_DOVLTC; + } + + writel(value | perfect_match, ioaddr + VLAN_TAG); + } else { + value &= ~(VLAN_VTHM | VLAN_ETV); + value &= ~(VLAN_EDVLP | VLAN_ESVL); + value &= ~VLAN_DOVLTC; + value &= ~VLAN_VID; + + writel(value, ioaddr + VLAN_TAG); + } +} + +static void vlan_enable(struct mac_device_info *hw, u32 type) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + VLAN_INCL); + value |= VLAN_VLTI; + value |= VLAN_CSVL; /* Only use SVLAN */ + value &= ~VLAN_VLC; + value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC; + writel(value, ioaddr + VLAN_INCL); +} + +static void vlan_rx_hw(struct mac_device_info *hw, + struct dma_desc *rx_desc, struct sk_buff *skb) +{ + if (hw->desc->get_rx_vlan_valid(rx_desc)) { + u16 vid = hw->desc->get_rx_vlan_tci(rx_desc); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +static void vlan_set_hw_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + VLAN_TAG); + + value &= ~VLAN_TAG_CTRL_EVLS_MASK; + + if (hw->hw_vlan_en) + /* Always strip VLAN on Receive */ + value |= VLAN_TAG_STRIP_ALL; + else + /* Do not strip VLAN on Receive */ + value |= VLAN_TAG_STRIP_NONE; + + /* Enable outer VLAN Tag in Rx DMA descriptor */ + value |= VLAN_TAG_CTRL_EVLRXS; + writel(value, ioaddr + VLAN_TAG); +} + +static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, + u16 perfect_match, bool is_double) +{ + void __iomem *ioaddr = hw->pcsr; + + writel(hash, ioaddr + VLAN_HASH_TABLE); + + if (hash) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + VLAN_TAG); + + value |= VLAN_VTHM | VLAN_ETV; + if (is_double) { + value |= VLAN_EDVLP; + value |= VLAN_ESVL; + value |= VLAN_DOVLTC; + } else { + value &= ~VLAN_EDVLP; + value &= ~VLAN_ESVL; + value &= ~VLAN_DOVLTC; + } + + value &= ~VLAN_VID; + writel(value, ioaddr + VLAN_TAG); + } else if (perfect_match) { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value |= XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + VLAN_TAG); + + value &= ~VLAN_VTHM; + value |= VLAN_ETV; + if (is_double) { + value |= VLAN_EDVLP; + value |= VLAN_ESVL; + value |= VLAN_DOVLTC; + } else { + value &= ~VLAN_EDVLP; + value &= ~VLAN_ESVL; + value &= ~VLAN_DOVLTC; + } + + value &= ~VLAN_VID; + writel(value | perfect_match, ioaddr + VLAN_TAG); + } else { + u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); + + value &= ~XGMAC_FILTER_VTFE; + + writel(value, ioaddr + XGMAC_PACKET_FILTER); + + value = readl(ioaddr + VLAN_TAG); + + value &= ~(VLAN_VTHM | VLAN_ETV); + value &= ~(VLAN_EDVLP | VLAN_ESVL); + value &= ~VLAN_DOVLTC; + value &= ~VLAN_VID; + + writel(value, ioaddr + VLAN_TAG); + } +} + +const struct stmmac_vlan_ops dwmac_vlan_ops = { + .update_vlan_hash = vlan_update_hash, + .enable_vlan = vlan_enable, + .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr, + .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr, + .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr, + .rx_hw_vlan = vlan_rx_hw, + .set_hw_vlan_mode = vlan_set_hw_mode, +}; + +const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = { + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .enable_vlan = vlan_enable, +}; + +const struct stmmac_vlan_ops dwxgmac210_vlan_ops = { + .update_vlan_hash = dwxgmac2_update_vlan_hash, + .enable_vlan = vlan_enable, + .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr, + .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr, + .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr, + .rx_hw_vlan = vlan_rx_hw, + .set_hw_vlan_mode = vlan_set_hw_mode, +}; + +u32 stmmac_get_num_vlan(void __iomem *ioaddr) +{ + u32 val, num_vlan; + + val = readl(ioaddr + HW_FEATURE3); + switch (val & VLAN_HW_FEAT_NRVF) { + case 0: + num_vlan = 1; + break; + case 1: + num_vlan = 4; + break; + case 2: + num_vlan = 8; + break; + case 3: + num_vlan = 16; + break; + case 4: + num_vlan = 24; + break; + case 5: + num_vlan = 32; + break; + default: + num_vlan = 1; + } + + return num_vlan; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h new file mode 100644 index 000000000000..c24f89a9049b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025, Altera Corporation + * stmmac VLAN(802.1Q) handling + */ + +#ifndef __STMMAC_VLAN_H__ +#define __STMMAC_VLAN_H__ + +#include <linux/bitfield.h> +#include "dwxgmac2.h" + +#define VLAN_TAG 0x00000050 +#define VLAN_TAG_DATA 0x00000054 +#define VLAN_HASH_TABLE 0x00000058 +#define VLAN_INCL 0x00000060 + +/* MAC VLAN */ +#define VLAN_EDVLP BIT(26) +#define VLAN_VTHM BIT(25) +#define VLAN_DOVLTC BIT(20) +#define VLAN_ESVL BIT(18) +#define VLAN_ETV BIT(16) +#define VLAN_VID GENMASK(15, 0) +#define VLAN_VLTI BIT(20) +#define VLAN_CSVL BIT(19) +#define VLAN_VLC GENMASK(17, 16) +#define VLAN_VLC_SHIFT 16 +#define VLAN_VLHT GENMASK(15, 0) + +/* MAC VLAN Tag */ +#define VLAN_TAG_VID GENMASK(15, 0) +#define VLAN_TAG_ETV BIT(16) + +/* MAC VLAN Tag Control */ +#define VLAN_TAG_CTRL_OB BIT(0) +#define VLAN_TAG_CTRL_CT BIT(1) +#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2) +#define VLAN_TAG_CTRL_OFS_SHIFT 2 +#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21) +#define VLAN_TAG_CTRL_EVLS_SHIFT 21 +#define VLAN_TAG_CTRL_EVLRXS BIT(24) + +#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0) +#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1) +#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2) +#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3) + +/* MAC VLAN Tag Data/Filter */ +#define VLAN_TAG_DATA_VID GENMASK(15, 0) +#define VLAN_TAG_DATA_VEN BIT(16) +#define VLAN_TAG_DATA_ETV BIT(17) + +/* MAC VLAN HW FEAT */ +#define HW_FEATURE3 0x00000128 +#define VLAN_HW_FEAT_NRVF GENMASK(2, 0) + +extern const struct stmmac_vlan_ops dwmac_vlan_ops; +extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops; +extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops; + +u32 stmmac_get_num_vlan(void __iomem *ioaddr); + +#endif /* __STMMAC_VLAN_H__ */ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 30665ffe78cf..f20d1ff192ef 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -427,7 +427,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, if (netif_tx_queue_stopped(netif_txq)) { /* try recover if stopped by us */ - txq_trans_update(netif_txq); + txq_trans_update(ndev, netif_txq); netif_tx_wake_queue(netif_txq); } } @@ -2679,7 +2679,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) goto of_node_put; ret = of_get_mac_address(port_np, port->slave.mac_addr); - if (ret) { + if (ret == -EPROBE_DEFER) { + goto of_node_put; + } else if (ret) { am65_cpsw_am654_get_efuse_macid(port_np, port->port_id, port->slave.mac_addr); @@ -2760,7 +2762,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) mutex_init(&ndev_priv->mm_lock); port->qos.link_speed = SPEED_UNKNOWN; SET_NETDEV_DEV(port->ndev, dev); - port->ndev->dev.of_node = port->slave.port_np; + device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np)); eth_hw_addr_set(port->ndev, port->slave.mac_addr); @@ -3561,6 +3563,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return ret; } + am65_cpsw_nuss_get_ver(common); + + ret = am65_cpsw_nuss_init_host_p(common); + if (ret) + goto err_pm_clear; + + ret = am65_cpsw_nuss_init_slave_ports(common); + if (ret) + goto err_pm_clear; + node = of_get_child_by_name(dev->of_node, "mdio"); if (!node) { dev_warn(dev, "MDIO node not found\n"); @@ -3577,16 +3589,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) } of_node_put(node); - am65_cpsw_nuss_get_ver(common); - - ret = am65_cpsw_nuss_init_host_p(common); - if (ret) - goto err_of_clear; - - ret = am65_cpsw_nuss_init_slave_ports(common); - if (ret) - goto err_of_clear; - /* init common data */ ale_params.dev = dev; ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index a984b7d84e5e..54c24cd3d3be 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1156,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) } #endif +/* We need a custom implementation of phy_do_ioctl_running() because in switch + * mode, dev->phydev may be different than the phy of the active_slave. We need + * to operate on the locally saved phy instead. + */ +static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + struct phy_device *phy; + + if (!netif_running(dev)) + return -EINVAL; + + phy = cpsw->slaves[slave_no].phy; + if (phy) + return phy_mii_ioctl(phy, req, cmd); + + return -EOPNOTSUPP; +} + static const struct net_device_ops cpsw_netdev_ops = { .ndo_open = cpsw_ndo_open, .ndo_stop = cpsw_ndo_stop, @@ -1174,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_setup_tc = cpsw_ndo_setup_tc, .ndo_bpf = cpsw_ndo_bpf, .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, + .ndo_hwtstamp_get = cpsw_hwtstamp_get, + .ndo_hwtstamp_set = cpsw_hwtstamp_set, }; static void cpsw_get_drvinfo(struct net_device *ndev, @@ -1646,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev) ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; + /* Hijack PHY timestamping requests in order to block them */ + if (!cpsw->data.dual_emac) + ndev->see_all_hwtstamp_requests = true; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 5b5b52e4e7a7..8b9e2078c602 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1132,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_set_mac_address = cpsw_ndo_set_mac_address, - .ndo_eth_ioctl = cpsw_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, @@ -1147,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_bpf = cpsw_ndo_bpf, .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, .ndo_get_port_parent_id = cpsw_get_port_parent_id, + .ndo_hwtstamp_get = cpsw_hwtstamp_get, + .ndo_hwtstamp_set = cpsw_hwtstamp_set, }; static void cpsw_get_drvinfo(struct net_device *ndev, diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 6fe4edabba44..bc4fdf17a99e 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); } -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +int cpsw_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(dev); struct cpsw_common *cpsw = priv->cpsw; - struct hwtstamp_config cfg; + + /* This will only execute if dev->see_all_hwtstamp_requests is set */ + if (cfg->source != HWTSTAMP_SOURCE_NETDEV) { + NL_SET_ERR_MSG_MOD(extack, + "Switch mode only supports MAC timestamping"); + return -EOPNOTSUPP; + } if (cpsw->version != CPSW_VERSION_1 && cpsw->version != CPSW_VERSION_2 && cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON) return -ERANGE; - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: priv->rx_ts_enabled = 0; break; @@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; } - priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; + priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON; switch (cpsw->version) { case CPSW_VERSION_1: @@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) WARN_ON(1); } - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +int cpsw_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) { struct cpsw_common *cpsw = ndev_to_cpsw(dev); struct cpsw_priv *priv = netdev_priv(dev); - struct hwtstamp_config cfg; if (cpsw->version != CPSW_VERSION_1 && cpsw->version != CPSW_VERSION_2 && cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = priv->rx_ts_enabled; + cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = priv->rx_ts_enabled; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } #else -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +int cpsw_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) { return -EOPNOTSUPP; } -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +int cpsw_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } #endif /*CONFIG_TI_CPTS*/ -int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct cpsw_priv *priv = netdev_priv(dev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - struct phy_device *phy; - - if (!netif_running(dev)) - return -EINVAL; - - phy = cpsw->slaves[slave_no].phy; - - if (!phy_has_hwtstamp(phy)) { - switch (cmd) { - case SIOCSHWTSTAMP: - return cpsw_hwtstamp_set(dev, req); - case SIOCGHWTSTAMP: - return cpsw_hwtstamp_get(dev, req); - } - } - - if (phy) - return phy_mii_ioctl(phy, req, cmd); - - return -EOPNOTSUPP; -} - int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) { struct cpsw_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index f2fc55d9295d..91add8925e23 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg); void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv); void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); int cpsw_need_resplit(struct cpsw_common *cpsw); -int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd); int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate); int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data); @@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv); void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); void cpsw_qos_clsflower_resume(struct cpsw_priv *priv); +int cpsw_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg); +int cpsw_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index d88a0180294e..5b8fdb882172 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -1329,10 +1329,28 @@ void icssg_ndo_get_stats64(struct net_device *ndev, stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); - stats->rx_errors = ndev->stats.rx_errors; - stats->rx_dropped = ndev->stats.rx_dropped; + stats->rx_errors = ndev->stats.rx_errors + + emac_get_stat_by_name(emac, "FW_RX_ERROR") + + emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") + + emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") + + emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") + + emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN"); + stats->rx_dropped = ndev->stats.rx_dropped + + emac_get_stat_by_name(emac, "FW_DROPPED_PKT") + + emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") + + emac_get_stat_by_name(emac, "FW_INF_SAV") + + emac_get_stat_by_name(emac, "FW_INF_SA_DL") + + emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") + + emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") + + emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") + + emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") + + emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER"); stats->tx_errors = ndev->stats.tx_errors; - stats->tx_dropped = ndev->stats.tx_dropped; + stats->tx_dropped = ndev->stats.tx_dropped + + emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") + + emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") + + emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") + + emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF"); } EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index b6be4aa57a61..23c465f1ce7f 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -54,7 +54,7 @@ #define ICSSG_MAX_RFLOWS 8 /* per slice */ -#define ICSSG_NUM_PA_STATS 4 +#define ICSSG_NUM_PA_STATS 32 #define ICSSG_NUM_MIIG_STATS 60 /* Number of ICSSG related stats */ #define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS) diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 6f0edae38ea2..e8241e998aa9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -11,7 +11,6 @@ #define ICSSG_TX_PACKET_OFFSET 0xA0 #define ICSSG_TX_BYTE_OFFSET 0xEC -#define ICSSG_FW_STATS_BASE 0x0248 static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */ 0xb18, /* Slice 1 stats start */ @@ -46,9 +45,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac) if (prueth->pa_stats) { for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { - reg = ICSSG_FW_STATS_BASE + - icssg_all_pa_stats[i].offset * - PRUETH_NUM_MACS + slice * sizeof(u32); + reg = icssg_all_pa_stats[i].offset + + slice * sizeof(u32); regmap_read(prueth->pa_stats, reg, &val); emac->pa_stats[i] += val; } @@ -80,7 +78,7 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) if (emac->prueth->pa_stats) { for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) { if (!strcmp(icssg_all_pa_stats[i].name, stat_name)) - return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)]; + return emac->pa_stats[i]; } } diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h index e88b919f532c..5ec0b38e0c67 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.h +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h @@ -155,24 +155,10 @@ static const struct icssg_miig_stats icssg_all_miig_stats[] = { ICSSG_MIIG_STATS(tx_bytes, true), }; -/** - * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register - * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI - * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues - * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter - * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter - */ -struct pa_stats_regs { - u32 fw_rx_cnt; - u32 fw_tx_cnt; - u32 fw_tx_pre_overflow; - u32 fw_tx_exp_overflow; -}; - -#define ICSSG_PA_STATS(field) \ -{ \ - #field, \ - offsetof(struct pa_stats_regs, field), \ +#define ICSSG_PA_STATS(field) \ +{ \ + #field, \ + field, \ } struct icssg_pa_stats { @@ -181,10 +167,38 @@ struct icssg_pa_stats { }; static const struct icssg_pa_stats icssg_all_pa_stats[] = { - ICSSG_PA_STATS(fw_rx_cnt), - ICSSG_PA_STATS(fw_tx_cnt), - ICSSG_PA_STATS(fw_tx_pre_overflow), - ICSSG_PA_STATS(fw_tx_exp_overflow), + ICSSG_PA_STATS(FW_RTU_PKT_DROP), + ICSSG_PA_STATS(FW_Q0_OVERFLOW), + ICSSG_PA_STATS(FW_Q1_OVERFLOW), + ICSSG_PA_STATS(FW_Q2_OVERFLOW), + ICSSG_PA_STATS(FW_Q3_OVERFLOW), + ICSSG_PA_STATS(FW_Q4_OVERFLOW), + ICSSG_PA_STATS(FW_Q5_OVERFLOW), + ICSSG_PA_STATS(FW_Q6_OVERFLOW), + ICSSG_PA_STATS(FW_Q7_OVERFLOW), + ICSSG_PA_STATS(FW_DROPPED_PKT), + ICSSG_PA_STATS(FW_RX_ERROR), + ICSSG_PA_STATS(FW_RX_DS_INVALID), + ICSSG_PA_STATS(FW_TX_DROPPED_PACKET), + ICSSG_PA_STATS(FW_TX_TS_DROPPED_PACKET), + ICSSG_PA_STATS(FW_INF_PORT_DISABLED), + ICSSG_PA_STATS(FW_INF_SAV), + ICSSG_PA_STATS(FW_INF_SA_DL), + ICSSG_PA_STATS(FW_INF_PORT_BLOCKED), + ICSSG_PA_STATS(FW_INF_DROP_TAGGED), + ICSSG_PA_STATS(FW_INF_DROP_PRIOTAGGED), + ICSSG_PA_STATS(FW_INF_DROP_NOTAG), + ICSSG_PA_STATS(FW_INF_DROP_NOTMEMBER), + ICSSG_PA_STATS(FW_RX_EOF_SHORT_FRMERR), + ICSSG_PA_STATS(FW_RX_B0_DROP_EARLY_EOF), + ICSSG_PA_STATS(FW_TX_JUMBO_FRM_CUTOFF), + ICSSG_PA_STATS(FW_RX_EXP_FRAG_Q_DROP), + ICSSG_PA_STATS(FW_RX_FIFO_OVERRUN), + ICSSG_PA_STATS(FW_CUT_THR_PKT), + ICSSG_PA_STATS(FW_HOST_RX_PKT_CNT), + ICSSG_PA_STATS(FW_HOST_TX_PKT_CNT), + ICSSG_PA_STATS(FW_HOST_EGRESS_Q_PRE_OVERFLOW), + ICSSG_PA_STATS(FW_HOST_EGRESS_Q_EXP_OVERFLOW), }; #endif /* __NET_TI_ICSSG_STATS_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h index 424a7e945ea8..490a9cc06fb0 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h +++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h @@ -231,4 +231,37 @@ /* Start of 32 bits PA_STAT counters */ #define PA_STAT_32b_START_OFFSET 0x0080 +#define FW_RTU_PKT_DROP 0x0088 +#define FW_Q0_OVERFLOW 0x0090 +#define FW_Q1_OVERFLOW 0x0098 +#define FW_Q2_OVERFLOW 0x00A0 +#define FW_Q3_OVERFLOW 0x00A8 +#define FW_Q4_OVERFLOW 0x00B0 +#define FW_Q5_OVERFLOW 0x00B8 +#define FW_Q6_OVERFLOW 0x00C0 +#define FW_Q7_OVERFLOW 0x00C8 +#define FW_DROPPED_PKT 0x00F8 +#define FW_RX_ERROR 0x0100 +#define FW_RX_DS_INVALID 0x0108 +#define FW_TX_DROPPED_PACKET 0x0110 +#define FW_TX_TS_DROPPED_PACKET 0x0118 +#define FW_INF_PORT_DISABLED 0x0120 +#define FW_INF_SAV 0x0128 +#define FW_INF_SA_DL 0x0130 +#define FW_INF_PORT_BLOCKED 0x0138 +#define FW_INF_DROP_TAGGED 0x0140 +#define FW_INF_DROP_PRIOTAGGED 0x0148 +#define FW_INF_DROP_NOTAG 0x0150 +#define FW_INF_DROP_NOTMEMBER 0x0158 +#define FW_RX_EOF_SHORT_FRMERR 0x0188 +#define FW_RX_B0_DROP_EARLY_EOF 0x0190 +#define FW_TX_JUMBO_FRM_CUTOFF 0x0198 +#define FW_RX_EXP_FRAG_Q_DROP 0x01A0 +#define FW_RX_FIFO_OVERRUN 0x01A8 +#define FW_CUT_THR_PKT 0x01B0 +#define FW_HOST_RX_PKT_CNT 0x0248 +#define FW_HOST_TX_PKT_CNT 0x0250 +#define FW_HOST_EGRESS_Q_PRE_OVERFLOW 0x0258 +#define FW_HOST_EGRESS_Q_EXP_OVERFLOW 0x0260 + #endif /* __NET_TI_ICSSG_SWITCH_MAP_H */ diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index e4d993f31374..a75bca1243e3 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -8,6 +8,7 @@ #include <linux/if_vlan.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> @@ -16,6 +17,7 @@ #include <linux/cache.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/string_choices.h> #include <linux/spi/spi.h> #include <linux/of_net.h> @@ -45,7 +47,6 @@ struct mse102x_stats { u64 xfer_err; - u64 invalid_cmd; u64 invalid_ctr; u64 invalid_dft; u64 invalid_len; @@ -56,7 +57,6 @@ struct mse102x_stats { static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = { "SPI transfer errors", - "Invalid command", "Invalid CTR", "Invalid DFT", "Invalid frame length", @@ -85,6 +85,8 @@ struct mse102x_net_spi { struct spi_message spi_msg; struct spi_transfer spi_xfer; + bool valid_cmd_received; + #ifdef CONFIG_DEBUG_FS struct dentry *device_root; #endif @@ -98,16 +100,18 @@ static int mse102x_info_show(struct seq_file *s, void *what) { struct mse102x_net_spi *mses = s->private; - seq_printf(s, "TX ring size : %u\n", + seq_printf(s, "TX ring size : %u\n", skb_queue_len(&mses->mse102x.txq)); - seq_printf(s, "IRQ : %d\n", + seq_printf(s, "IRQ : %d\n", mses->spidev->irq); - seq_printf(s, "SPI effective speed : %lu\n", + seq_printf(s, "SPI effective speed : %lu\n", (unsigned long)mses->spi_xfer.effective_speed_hz); - seq_printf(s, "SPI mode : %x\n", + seq_printf(s, "SPI mode : %x\n", mses->spidev->mode); + seq_printf(s, "Received valid CMD once : %s\n", + str_yes_no(mses->valid_cmd_received)); return 0; } @@ -194,10 +198,10 @@ static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb) } else if (*cmd != cpu_to_be16(DET_CMD)) { net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", __func__, *cmd); - mse->stats.invalid_cmd++; ret = -EIO; } else { memcpy(rxb, trx + 2, 2); + mses->valid_cmd_received = true; } return ret; @@ -306,7 +310,7 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data) data, len, true); } -static void mse102x_rx_pkt_spi(struct mse102x_net *mse) +static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse) { struct sk_buff *skb; unsigned int rxalign; @@ -315,31 +319,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) __be16 rx = 0; u16 cmd_resp; u8 *rxpkt; - int ret; mse102x_tx_cmd_spi(mse, CMD_CTR); - ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); - cmd_resp = be16_to_cpu(rx); - - if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) { + if (mse102x_rx_cmd_spi(mse, (u8 *)&rx)) { usleep_range(50, 100); + return IRQ_NONE; + } - mse102x_tx_cmd_spi(mse, CMD_CTR); - ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx); - if (ret) - return; - - cmd_resp = be16_to_cpu(rx); - if ((cmd_resp & CMD_MASK) != CMD_RTS) { - net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", - __func__, cmd_resp); - mse->stats.invalid_rts++; - drop = true; - goto drop; - } - - net_dbg_ratelimited("%s: Unexpected response to first CMD\n", - __func__); + cmd_resp = be16_to_cpu(rx); + if ((cmd_resp & CMD_MASK) != CMD_RTS) { + net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", + __func__, cmd_resp); + mse->stats.invalid_rts++; + drop = true; + goto drop; } rxlen = cmd_resp & LEN_MASK; @@ -360,7 +353,7 @@ drop: rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); if (!skb) - return; + return IRQ_NONE; /* 2 bytes Start of frame (before ethernet header) * 2 bytes Data frame tail (after ethernet frame) @@ -370,7 +363,7 @@ drop: if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { mse->ndev->stats.rx_errors++; dev_kfree_skb(skb); - return; + return IRQ_HANDLED; } if (netif_msg_pktdata(mse)) @@ -381,6 +374,8 @@ drop: mse->ndev->stats.rx_packets++; mse->ndev->stats.rx_bytes += rxlen; + + return IRQ_HANDLED; } static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb, @@ -512,20 +507,36 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) { struct mse102x_net *mse = _mse; struct mse102x_net_spi *mses = to_mse102x_spi(mse); + irqreturn_t ret; mutex_lock(&mses->lock); - mse102x_rx_pkt_spi(mse); + ret = mse102x_rx_pkt_spi(mse); mutex_unlock(&mses->lock); - return IRQ_HANDLED; + return ret; } static int mse102x_net_open(struct net_device *ndev) { + struct irq_data *irq_data = irq_get_irq_data(ndev->irq); struct mse102x_net *mse = netdev_priv(ndev); struct mse102x_net_spi *mses = to_mse102x_spi(mse); int ret; + if (!irq_data) { + netdev_err(ndev, "Invalid IRQ: %d\n", ndev->irq); + return -EINVAL; + } + + switch (irqd_get_trigger_type(irq_data)) { + case IRQ_TYPE_LEVEL_HIGH: + case IRQ_TYPE_LEVEL_LOW: + break; + default: + netdev_warn_once(ndev, "Only IRQ type level recommended, please update your device tree firmware.\n"); + break; + } + ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, ndev->name, mse); if (ret < 0) { @@ -543,7 +554,8 @@ static int mse102x_net_open(struct net_device *ndev) * So poll for possible packet(s) to re-arm the interrupt. */ mutex_lock(&mses->lock); - mse102x_rx_pkt_spi(mse); + if (mse102x_rx_pkt_spi(mse) == IRQ_NONE) + mse102x_rx_pkt_spi(mse); mutex_unlock(&mses->lock); netif_dbg(mse, ifup, ndev, "network device up\n"); diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 47e3e8434b9e..e5fc942c28cc 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -40,7 +40,7 @@ config NGBE will be called ngbe. config TXGBE - tristate "Wangxun(R) 10GbE PCI Express adapters support" + tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support" depends on PCI depends on COMMON_CLK depends on I2C_DESIGNWARE_PLATFORM @@ -55,7 +55,7 @@ config TXGBE select PCS_XPCS select LIBWX help - This driver supports Wangxun(R) 10GbE PCI Express family of + This driver supports Wangxun(R) 10/25/40GbE PCI Express family of adapters. More specific information on configuring the driver is in diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile index e9f0f1f2309b..9b78b604a94e 100644 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_LIBWX) += libwx.o -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 43019ec9329c..c12a4cb951f6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -219,7 +219,7 @@ int wx_nway_reset(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_nway_reset(wx->phylink); @@ -231,9 +231,6 @@ int wx_get_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) - return -EOPNOTSUPP; - return phylink_ethtool_ksettings_get(wx->phylink, cmd); } EXPORT_SYMBOL(wx_get_link_ksettings); @@ -243,7 +240,7 @@ int wx_set_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_ksettings_set(wx->phylink, cmd); @@ -255,7 +252,7 @@ void wx_get_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return; phylink_ethtool_get_pauseparam(wx->phylink, pause); @@ -267,7 +264,7 @@ int wx_set_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) return -EOPNOTSUPP; return phylink_ethtool_set_pauseparam(wx->phylink, pause); @@ -345,6 +342,7 @@ int wx_set_coalesce(struct net_device *netdev, max_eitr = WX_SP_MAX_EITR; break; case wx_mac_aml: + case wx_mac_aml40: max_eitr = WX_AML_MAX_EITR; break; default: @@ -375,6 +373,7 @@ int wx_set_coalesce(struct net_device *netdev, switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: tx_itr_param = WX_12K_ITR; break; default: @@ -413,15 +412,10 @@ static unsigned int wx_max_channels(struct wx *wx) max_combined = 1; } else { /* support up to max allowed queues with RSS */ - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) max_combined = 63; - break; - default: + else max_combined = 8; - break; - } } return max_combined; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 490d34233d38..0f4be72116b8 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -10,6 +10,7 @@ #include "wx_type.h" #include "wx_lib.h" +#include "wx_sriov.h" #include "wx_hw.h" static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) @@ -112,15 +113,10 @@ static void wx_intr_disable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMS(0), mask); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMS(1), mask); - break; - default: - break; } } @@ -132,15 +128,10 @@ void wx_intr_enable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMC(0), mask); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMC(1), mask); - break; - default: - break; } } EXPORT_SYMBOL(wx_intr_enable); @@ -704,6 +695,7 @@ void wx_init_eeprom_params(struct wx *wx) switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { wx_err(wx, "NVM Read Error\n"); return; @@ -773,14 +765,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, /* setup VMDq pool mapping */ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); - break; - default: - break; - } /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -918,14 +904,9 @@ void wx_init_rx_addrs(struct wx *wx) wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - switch (wx->mac.type) { - case wx_mac_sp: - case wx_mac_aml: + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { /* clear VMDq pool/queue selection for RAR 0 */ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); - break; - default: - break; } } @@ -970,11 +951,28 @@ static void wx_sync_mac_table(struct wx *wx) } } +static void wx_full_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } +} + /* this function destroys the first RAR entry */ void wx_mac_set_default_filter(struct wx *wx, u8 *addr) { memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); - wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].pools = BIT(VMDQ_P(0)); wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); wx_set_rar(wx, 0, wx->mac_table[0].addr, wx->mac_table[0].pools, @@ -999,7 +997,7 @@ void wx_flush_sw_mac_table(struct wx *wx) } EXPORT_SYMBOL(wx_flush_sw_mac_table); -static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -1030,7 +1028,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) return -ENOMEM; } -static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -1212,6 +1210,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) wx_dbg(wx, "Update mc addr list Complete\n"); } +static void wx_restore_vf_multicasts(struct wx *wx) +{ + u32 i, j, vector_bit, vector_reg; + struct vf_data_storage *vfinfo; + + for (i = 0; i < wx->num_vfs; i++) { + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i)); + + vfinfo = &wx->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + wx->addr_ctrl.mta_in_use++; + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]); + wr32m(wx, WX_PSR_MC_TBL(vector_reg), + BIT(vector_bit), BIT(vector_bit)); + /* errata 5: maintain a copy of the reg table conf */ + wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + wx_full_sync_mac_table(wx); +} + /** * wx_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure @@ -1229,6 +1256,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev) wx_update_mc_addr_list(wx, netdev); + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + wx_restore_vf_multicasts(wx); + return netdev_mc_count(netdev); } @@ -1249,7 +1279,7 @@ int wx_set_mac(struct net_device *netdev, void *p) if (retval) return retval; - wx_del_mac_filter(wx, wx->mac.addr, 0); + wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0)); eth_hw_addr_set(netdev, addr->sa_data); memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); @@ -1351,6 +1381,10 @@ static int wx_hpbthresh(struct wx *wx) /* Calculate delay value for device */ dv_id = WX_DV(link, tc); + /* Loopback switch introduces additional latency */ + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + dv_id += WX_B2BT(tc); + /* Delay value is calculated in bit times convert to KB */ kb = WX_BT2KB(dv_id); rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; @@ -1406,12 +1440,107 @@ static void wx_pbthresh_setup(struct wx *wx) wx->fc.low_water = 0; } +static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 pfvfspoof, reg_offset, vf_shift; + + vf_shift = WX_VF_IND_SHIFT(vf); + reg_offset = WX_VF_REG_OFFSET(vf); + + pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset)); + if (enable) + pfvfspoof |= BIT(vf_shift); + else + pfvfspoof &= ~BIT(vf_shift); + wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof); +} + +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + struct wx *wx = netdev_priv(netdev); + u32 regval; + + if (vf >= wx->num_vfs) + return -EINVAL; + + wx->vfinfo[vf].spoofchk_enabled = setting; + + regval = (setting << vf_bit); + wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval); + + if (wx->vfinfo[vf].vlan_count) + wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval); + + return 0; +} + +static void wx_configure_virtualization(struct wx *wx) +{ + u16 pool = wx->num_rx_pools; + u32 reg_offset, vf_shift; + u32 i; + + if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + return; + + wr32m(wx, WX_PSR_VM_CTL, + WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN, + FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) | + WX_PSR_VM_CTL_REPLEN); + while (pool--) + wr32m(wx, WX_PSR_VM_L2CTL(pool), + WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE); + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + vf_shift = BIT(VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(0), vf_shift); + wr32(wx, WX_TDM_VF_TE(0), vf_shift); + } else { + vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0)); + + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + } + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + + for (i = 0; i < wx->num_vfs; i++) { + if (!wx->vfinfo[i].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, i, false); + /* enable ethertype anti spoofing if hw supports it */ + wx_set_ethertype_anti_spoofing(wx, true, i); + } +} + static void wx_configure_port(struct wx *wx) { u32 value, i; - value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + value = (wx->num_vfs == 0) ? + WX_CFG_PORT_CTL_NUM_VT_NONE : + WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + if (wx->ring_feature[RING_F_RSS].indices == 4) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } else { + value = 0; + } + } + + value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK | WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ, value); @@ -1472,6 +1601,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable) } } +static void wx_vlan_promisc_enable(struct wx *wx) +{ + u32 vlnctrl, i, vind, bits, reg_idx; + + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + return; + } + /* We are already in VLAN promisc, nothing to do */ + if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + set_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + /* Add PF to all active pools */ + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits |= BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < wx->mac.vft_size; i++) + wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX); +} + +static void wx_scrub_vfta(struct wx *wx) +{ + u32 i, vid, bits, vfta, vind, vlvf, reg_idx; + + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX); + /* pull VLAN ID from VLVF */ + vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN; + if (vlvf & WX_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, wx->active_vlans)) + continue; + } + /* remove PF from the pool */ + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits &= ~BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < wx->mac.vft_size; i++) { + vfta = wx->mac.vft_shadow[i]; + wr32(wx, WX_PSR_VLAN_TBL(i), vfta); + } +} + +static void wx_vlan_promisc_disable(struct wx *wx) +{ + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + /* We are not in VLAN promisc, nothing to do */ + if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + wx_scrub_vfta(wx); +} + void wx_set_rx_mode(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); @@ -1484,7 +1690,7 @@ void wx_set_rx_mode(struct net_device *netdev) /* Check for Promiscuous and All Multicast modes */ fctrl = rd32(wx, WX_PSR_CTL); fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); - vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0))); vmolr &= ~(WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_ROPE | @@ -1505,7 +1711,10 @@ void wx_set_rx_mode(struct net_device *netdev) fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; /* pf don't want packets routing to vf, so clear UPE */ vmolr |= WX_PSR_VM_L2CTL_MPE; - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) && + test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } if (netdev->flags & IFF_ALLMULTI) { @@ -1528,7 +1737,7 @@ void wx_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = wx_write_uc_addr_list(netdev, 0); + count = wx_write_uc_addr_list(netdev, VMDQ_P(0)); if (count < 0) { vmolr &= ~WX_PSR_VM_L2CTL_ROPE; vmolr |= WX_PSR_VM_L2CTL_UPE; @@ -1546,7 +1755,7 @@ void wx_set_rx_mode(struct net_device *netdev) wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); wr32(wx, WX_PSR_CTL, fctrl); - wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); + wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && (features & NETIF_F_HW_VLAN_STAG_RX)) @@ -1554,6 +1763,10 @@ void wx_set_rx_mode(struct net_device *netdev) else wx_vlan_strip_control(wx, false); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wx_vlan_promisc_disable(wx); + else + wx_vlan_promisc_enable(wx); } EXPORT_SYMBOL(wx_set_rx_mode); @@ -1803,6 +2016,13 @@ static void wx_setup_reta(struct wx *wx) u32 random_key_size = WX_RSS_KEY_SIZE / 4; u32 i, j; + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) { + if (wx->mac.type == wx_mac_em) + rss_i = 1; + else + rss_i = rss_i < 4 ? 4 : rss_i; + } + /* Fill out hash function seeds */ for (i = 0; i < random_key_size; i++) wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); @@ -1820,10 +2040,42 @@ static void wx_setup_reta(struct wx *wx) wx_store_reta(wx); } +#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1) +#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2) +static void wx_setup_psrtype(struct wx *wx) +{ + int rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 psrtype; + int pool; + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_OUTL2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + for_each_set_bit(pool, &wx->fwd_bitmask, 8) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } else { + if (rss_i > 3) + psrtype |= WX_RDB_RSS_PL_4; + else if (rss_i > 1) + psrtype |= WX_RDB_RSS_PL_2; + + for_each_set_bit(pool, &wx->fwd_bitmask, 32) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + static void wx_setup_mrqc(struct wx *wx) { u32 rss_field = 0; + /* VT, and RSS do not coexist at the same time */ + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return; + /* Disable indicating checksum in descriptor, enables RSS hash */ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); @@ -1853,16 +2105,11 @@ static void wx_setup_mrqc(struct wx *wx) **/ void wx_configure_rx(struct wx *wx) { - u32 psrtype, i; int ret; + u32 i; wx_disable_rx(wx); - - psrtype = WX_RDB_PL_CFG_L4HDR | - WX_RDB_PL_CFG_L3HDR | - WX_RDB_PL_CFG_L2HDR | - WX_RDB_PL_CFG_TUN_TUNHDR; - wr32(wx, WX_RDB_PL_CFG(0), psrtype); + wx_setup_psrtype(wx); /* enable hw crc stripping */ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); @@ -1910,6 +2157,7 @@ void wx_configure(struct wx *wx) { wx_set_rxpba(wx); wx_pbthresh_setup(wx); + wx_configure_virtualization(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -2004,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx) } EXPORT_SYMBOL(wx_stop_adapter); -void wx_reset_misc(struct wx *wx) +void wx_reset_mac(struct wx *wx) { - int i; - /* receive packets that size > 2048 */ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); @@ -2019,6 +2265,14 @@ void wx_reset_misc(struct wx *wx) WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); +} +EXPORT_SYMBOL(wx_reset_mac); + +void wx_reset_misc(struct wx *wx) +{ + int i; + + wx_reset_mac(wx); wr32m(wx, WX_MIS_RST_ST, WX_MIS_RST_ST_RST_INIT, 0x1E00); @@ -2268,7 +2522,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, * * Turn on/off specified VLAN in the VLAN filter table. **/ -static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) { u32 bitindex, vfta, targetbit; bool vfta_changed = false; @@ -2524,7 +2778,8 @@ void wx_update_stats(struct wx *wx) hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); } - for (i = 0; i < wx->mac.max_rx_queues; i++) + for (i = wx->num_vfs * wx->num_rx_queues_per_pool; + i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); } EXPORT_SYMBOL(wx_update_stats); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index b883342bb576..26a56cba60b9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -26,9 +26,12 @@ void wx_init_eeprom_params(struct wx *wx); void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); void wx_init_rx_addrs(struct wx *wx); void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool); +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool); void wx_flush_sw_mac_table(struct wx *wx); int wx_set_mac(struct net_device *netdev, void *p); void wx_disable_rx(struct wx *wx); +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int wx_disable_sec_rx_path(struct wx *wx); void wx_enable_sec_rx_path(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); @@ -39,9 +42,11 @@ void wx_configure(struct wx *wx); void wx_start_hw(struct wx *wx); int wx_disable_pcie_master(struct wx *wx); int wx_stop_adapter(struct wx *wx); +void wx_reset_mac(struct wx *wx); void wx_reset_misc(struct wx *wx); int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index e69eaa65e0de..5c747509d56b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -5,6 +5,7 @@ #include <net/ip6_checksum.h> #include <net/page_pool/helpers.h> #include <net/inet_ecn.h> +#include <linux/workqueue.h> #include <linux/iopoll.h> #include <linux/sctp.h> #include <linux/pci.h> @@ -1620,6 +1621,65 @@ void wx_napi_disable_all(struct wx *wx) } EXPORT_SYMBOL(wx_napi_disable_all); +static bool wx_set_vmdq_queues(struct wx *wx) +{ + u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit; + u16 rss_i = wx->ring_feature[RING_F_RSS].limit; + u16 rss_m = WX_RSS_DISABLED_MASK; + u16 vmdq_m = 0; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += wx->ring_feature[RING_F_VMDQ].offset; + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 64, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool + */ + if (vmdq_i > 32 || rss_i < 4) { + vmdq_m = WX_VMDQ_2Q_MASK; + rss_m = WX_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = WX_VMDQ_4Q_MASK; + rss_m = WX_RSS_4Q_MASK; + rss_i = 4; + } + } else { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 8, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + wx->ring_feature[RING_F_VMDQ].indices = vmdq_i; + wx->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + wx->ring_feature[RING_F_RSS].indices = rss_i; + wx->ring_feature[RING_F_RSS].mask = rss_m; + + wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + wx->num_rx_pools = vmdq_i; + wx->num_rx_queues_per_pool = rss_i; + + wx->num_rx_queues = vmdq_i * rss_i; + wx->num_tx_queues = vmdq_i * rss_i; + + return true; +} + /** * wx_set_rss_queues: Allocate queues for RSS * @wx: board private structure to initialize @@ -1634,6 +1694,10 @@ static void wx_set_rss_queues(struct wx *wx) /* set mask for 16 queue limit of RSS */ f = &wx->ring_feature[RING_F_RSS]; + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + f->mask = WX_RSS_64Q_MASK; + else + f->mask = WX_RSS_8Q_MASK; f->indices = f->limit; if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) @@ -1666,6 +1730,9 @@ static void wx_set_num_queues(struct wx *wx) wx->num_tx_queues = 1; wx->queues_per_pool = 1; + if (wx_set_vmdq_queues(wx)) + return; + wx_set_rss_queues(wx); } @@ -1746,6 +1813,10 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; + /* Disable VMDq support */ + dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n"); + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + /* Disable RSS */ dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); wx->ring_feature[RING_F_RSS].limit = 1; @@ -1772,6 +1843,49 @@ static int wx_set_interrupt_capability(struct wx *wx) return 0; } +static bool wx_cache_ring_vmdq(struct wx *wx) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS]; + u16 reg_idx; + int i; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->rx_ring[i]->reg_idx = reg_idx; + } + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->tx_ring[i]->reg_idx = reg_idx; + } + } else { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_rx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->rx_ring[i]->reg_idx = reg_idx + i; + + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_tx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + /** * wx_cache_ring_rss - Descriptor ring to register mapping for RSS * @wx: board private structure to initialize @@ -1783,6 +1897,9 @@ static void wx_cache_ring_rss(struct wx *wx) { u16 i; + if (wx_cache_ring_vmdq(wx)) + return; + for (i = 0; i < wx->num_rx_queues; i++) wx->rx_ring[i]->reg_idx = i; @@ -1843,6 +1960,7 @@ static int wx_alloc_q_vector(struct wx *wx, switch (wx->mac.type) { case wx_mac_sp: case wx_mac_aml: + case wx_mac_aml40: default_itr = WX_12K_ITR; break; default: @@ -2181,7 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ - msix_vector += 1; /* offset for queue vectors */ + if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7)) + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2210,6 +2329,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg = q_vector->itr & WX_SP_MAX_EITR; break; case wx_mac_aml: + case wx_mac_aml40: itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR; break; default: @@ -2233,10 +2353,17 @@ void wx_configure_vectors(struct wx *wx) { struct pci_dev *pdev = wx->pdev; u32 eitrsel = 0; - u16 v_idx; + u16 v_idx, i; if (pdev->msix_enabled) { /* Populate MSIX to EITR Select */ + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + if (wx->num_vfs >= 32) + eitrsel = BIT(wx->num_vfs % 32) - 1; + } else { + for (i = 0; i < wx->num_vfs; i++) + eitrsel |= BIT(i); + } wr32(wx, WX_PX_ITRSEL, eitrsel); /* use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt @@ -2876,6 +3003,33 @@ netdev_features_t wx_fix_features(struct net_device *netdev, } EXPORT_SYMBOL(wx_fix_features); +#define WX_MAX_TUNNEL_HDR_LEN 80 +netdev_features_t wx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct wx *wx = netdev_priv(netdev); + + if (!skb->encapsulation) + return features; + + if (wx->mac.type == wx_mac_em) + return features & ~NETIF_F_CSUM_MASK; + + if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > + WX_MAX_TUNNEL_HDR_LEN)) + return features & ~NETIF_F_CSUM_MASK; + + if (skb->inner_protocol_type == ENCAP_TYPE_ETHER && + skb->inner_protocol != htons(ETH_P_IP) && + skb->inner_protocol != htons(ETH_P_IPV6) && + skb->inner_protocol != htons(ETH_P_TEB)) + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; +} +EXPORT_SYMBOL(wx_features_check); + void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring) { @@ -2942,5 +3096,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count, } EXPORT_SYMBOL(wx_set_ring); +void wx_service_event_schedule(struct wx *wx) +{ + if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state)) + queue_work(system_power_efficient_wq, &wx->service_task); +} +EXPORT_SYMBOL(wx_service_event_schedule); + +void wx_service_event_complete(struct wx *wx) +{ + if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state))) + return; + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} +EXPORT_SYMBOL(wx_service_event_complete); + +void wx_service_timer(struct timer_list *t) +{ + struct wx *wx = from_timer(wx, t, service_timer); + unsigned long next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&wx->service_timer, next_event_offset + jiffies); + + wx_service_event_schedule(wx); +} +EXPORT_SYMBOL(wx_service_timer); + MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index fdeb0c315b75..aed6ea8cf0d6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -33,7 +33,13 @@ void wx_get_stats64(struct net_device *netdev, int wx_set_features(struct net_device *netdev, netdev_features_t features); netdev_features_t wx_fix_features(struct net_device *netdev, netdev_features_t features); +netdev_features_t wx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features); void wx_set_ring(struct wx *wx, u32 new_tx_count, u32 new_rx_count, struct wx_ring *temp_ring); +void wx_service_event_schedule(struct wx *wx); +void wx_service_event_complete(struct wx *wx); +void wx_service_timer(struct timer_list *t); -#endif /* _NGBE_LIB_H_ */ +#endif /* _WX_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c new file mode 100644 index 000000000000..73af5f11c3bd --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/pci.h> +#include "wx_type.h" +#include "wx_mbx.h" + +/** + * wx_obtain_mbx_lock_pf - obtain mailbox lock + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf) +{ + int count = 5; + u32 mailbox; + + while (count--) { + /* Take ownership of the buffer */ + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(wx, WX_PXMAILBOX(vf)); + if (mailbox & WX_PXMAILBOX_PFU) + return 0; + else if (count) + udelay(10); + } + wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf); + + return -EBUSY; +} + +static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index) +{ + u32 mbvficr = rd32(wx, WX_MBVFICR(index)); + + if (!(mbvficr & mask)) + return -EBUSY; + wr32(wx, WX_MBVFICR(index), mask); + + return 0; +} + +/** + * wx_check_for_ack_pf - checks to see if the VF has acked + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has set the status bit or else -EBUSY + **/ +int wx_check_for_ack_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFACK_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_check_for_msg_pf - checks to see if the VF has sent mail + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has got req bit or else -EBUSY + **/ +int wx_check_for_msg_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFREQ_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_write_mbx_pf - Places a message in the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EINVAL/-EBUSY on failure + **/ +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret, i; + + /* mbx->size is up to 15 */ + if (size > mbx->size) { + wx_err(wx, "Invalid mailbox message size %d", size); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + /* flush msg and acks as we are overwriting the message buffer */ + wx_check_for_msg_pf(wx, vf); + wx_check_for_ack_pf(wx, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(wx, WX_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS); + + return 0; +} + +/** + * wx_read_mbx_pf - Read a message from the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret; + u16 i; + + /* limit read to size of mailbox and mbx->size is up to 15 */ + if (size > mbx->size) + size = mbx->size; + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + for (i = 0; i < size; i++) + msg[i] = rd32a(wx, WX_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK); + + return 0; +} + +/** + * wx_check_for_rst_pf - checks to see if the VF has reset + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_check_for_rst_pf(struct wx *wx, u16 vf) +{ + u32 reg_offset = WX_VF_REG_OFFSET(vf); + u32 vf_shift = WX_VF_IND_SHIFT(vf); + u32 vflre = 0; + + vflre = rd32(wx, WX_VFLRE(reg_offset)); + if (!(vflre & BIT(vf_shift))) + return -EBUSY; + wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift)); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h new file mode 100644 index 000000000000..05aae138dbc3 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +#ifndef _WX_MBX_H_ +#define _WX_MBX_H_ + +#define WX_VXMAILBOX_SIZE 15 + +/* PF Registers */ +#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */ +#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */ +#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */ +#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */ + +#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */ +#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */ +#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0) +#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16) + +#define WX_VT_MSGTYPE_ACK BIT(31) +#define WX_VT_MSGTYPE_NACK BIT(30) +#define WX_VT_MSGTYPE_CTS BIT(29) +#define WX_VT_MSGINFO_SHIFT 16 +#define WX_VT_MSGINFO_MASK GENMASK(23, 16) + +enum wx_pfvf_api_rev { + wx_mbox_api_null, + wx_mbox_api_13 = 4, /* API version 1.3 */ + wx_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API */ +#define WX_VF_RESET 0x01 /* VF requests reset */ +#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */ +#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define WX_VF_GET_RETA 0x0a /* VF request for RETA */ +#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define WX_VF_UPDATE_XCAST_MODE 0x0c +#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */ + +#define WX_VF_BACKUP 0x8001 /* VF requests backup */ + +#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */ +#define WX_PF_NOFITY_VF_LINK_STATUS 0x1 +#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + +#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define WX_VF_DEF_QUEUE 4 /* Default queue offset */ + +#define WX_VF_PERMADDR_MSG_LEN 4 + +enum wxvf_xcast_modes { + WXVF_XCAST_MODE_NONE = 0, + WXVF_XCAST_MODE_MULTI, + WXVF_XCAST_MODE_ALLMULTI, + WXVF_XCAST_MODE_PROMISC, +}; + +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id); + +#endif /* _WX_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c index 07c015ba338f..2c39b879f977 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c @@ -15,12 +15,14 @@ #define WX_INCVAL_100 0xA00000 #define WX_INCVAL_10 0xC7F380 #define WX_INCVAL_EM 0x2000000 +#define WX_INCVAL_AML 0xA00000 #define WX_INCVAL_SHIFT_10GB 20 #define WX_INCVAL_SHIFT_1GB 18 #define WX_INCVAL_SHIFT_100 15 #define WX_INCVAL_SHIFT_10 12 #define WX_INCVAL_SHIFT_EM 22 +#define WX_INCVAL_SHIFT_AML 21 #define WX_OVERFLOW_PERIOD (HZ * 30) #define WX_PTP_TX_TIMEOUT (HZ) @@ -504,15 +506,27 @@ static long wx_ptp_create_clock(struct wx *wx) wx->ptp_caps.gettimex64 = wx_ptp_gettimex64; wx->ptp_caps.settime64 = wx_ptp_settime64; wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work; - if (wx->mac.type == wx_mac_em) { - wx->ptp_caps.max_adj = 500000000; + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + wx->ptp_caps.max_adj = 250000000; wx->ptp_caps.n_per_out = 1; wx->ptp_setup_sdp = wx_ptp_setup_sdp; wx->ptp_caps.enable = wx_ptp_feature_enable; - } else { + break; + case wx_mac_sp: wx->ptp_caps.max_adj = 250000000; wx->ptp_caps.n_per_out = 0; wx->ptp_setup_sdp = NULL; + break; + case wx_mac_em: + wx->ptp_caps.max_adj = 500000000; + wx->ptp_caps.n_per_out = 1; + wx->ptp_setup_sdp = wx_ptp_setup_sdp; + wx->ptp_caps.enable = wx_ptp_feature_enable; + break; + default: + return -EOPNOTSUPP; } wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev); @@ -647,10 +661,18 @@ static u64 wx_ptp_read(const struct cyclecounter *hw_cc) static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval) { - if (wx->mac.type == wx_mac_em) { + switch (wx->mac.type) { + case wx_mac_aml: + case wx_mac_aml40: + *shift = WX_INCVAL_SHIFT_AML; + *incval = WX_INCVAL_AML; + return; + case wx_mac_em: *shift = WX_INCVAL_SHIFT_EM; *incval = WX_INCVAL_EM; return; + default: + break; } switch (wx->speed) { diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c new file mode 100644 index 000000000000..e8656d9d733b --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "wx_type.h" +#include "wx_hw.h" +#include "wx_mbx.h" +#include "wx_sriov.h" + +static void wx_vf_configuration(struct pci_dev *pdev, int event_mask) +{ + bool enable = !!WX_VF_ENABLE_CHECK(event_mask); + struct wx *wx = pci_get_drvdata(pdev); + u32 vfn = WX_VF_NUM_GET(event_mask); + + if (enable) + eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr); +} + +static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs) +{ + struct vf_macvlans *mv_list; + int num_vf_macvlans, i; + + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&wx->vf_mvs.mvlist); + + num_vf_macvlans = wx->mac.num_rar_entries - + (WX_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) + return -EINVAL; + + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); + if (!mv_list) + return -ENOMEM; + + for (i = 0; i < num_vf_macvlans; i++) { + mv_list[i].vf = -1; + mv_list[i].free = true; + list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist); + } + wx->mv_list = mv_list; + + return 0; +} + +static void wx_sriov_clear_data(struct wx *wx) +{ + /* set num VFs to 0 to prevent access to vfinfo */ + wx->num_vfs = 0; + + /* free VF control structures */ + kfree(wx->vfinfo); + wx->vfinfo = NULL; + + /* free macvlan list */ + kfree(wx->mv_list); + wx->mv_list = NULL; + + /* set default pool back to 0 */ + wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0); + wx->ring_feature[RING_F_VMDQ].offset = 0; + + clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + /* Disable VMDq flag so device will be set in NM mode */ + if (wx->ring_feature[RING_F_VMDQ].limit == 1) + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); +} + +static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) +{ + int i, ret = 0; + u32 value = 0; + + set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + if (!wx->ring_feature[RING_F_VMDQ].limit) + wx->ring_feature[RING_F_VMDQ].limit = 1; + wx->ring_feature[RING_F_VMDQ].offset = num_vfs; + + wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!wx->vfinfo) + return -ENOMEM; + + ret = wx_alloc_vf_macvlans(wx, num_vfs); + if (ret) + return ret; + + /* Initialize default switching mode VEB */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN); + + for (i = 0; i < num_vfs; i++) { + /* enable spoof checking for all VFs */ + wx->vfinfo[i].spoofchk_enabled = true; + wx->vfinfo[i].link_enable = true; + /* untrust all VFs */ + wx->vfinfo[i].trusted = false; + /* set the default xcast mode */ + wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE; + } + + if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + value = WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (num_vfs < 32) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK, + value); + + return ret; +} + +static void wx_sriov_reinit(struct wx *wx) +{ + rtnl_lock(); + wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev)); + rtnl_unlock(); +} + +void wx_disable_sriov(struct wx *wx) +{ + if (!pci_vfs_assigned(wx->pdev)) + pci_disable_sriov(wx->pdev); + else + wx_err(wx, "Unloading driver while VFs are assigned.\n"); + + /* clear flags and free allloced data */ + wx_sriov_clear_data(wx); +} +EXPORT_SYMBOL(wx_disable_sriov); + +static int wx_pci_sriov_enable(struct pci_dev *dev, + int num_vfs) +{ + struct wx *wx = pci_get_drvdata(dev); + int err = 0, i; + + err = __wx_enable_sriov(wx, num_vfs); + if (err) + return err; + + wx->num_vfs = num_vfs; + for (i = 0; i < wx->num_vfs; i++) + wx_vf_configuration(dev, (i | WX_VF_ENABLE)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + wx_sriov_reinit(wx); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + wx_err(wx, "Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + + return num_vfs; +err_out: + wx_sriov_clear_data(wx); + return err; +} + +static void wx_pci_sriov_disable(struct pci_dev *dev) +{ + struct wx *wx = pci_get_drvdata(dev); + + wx_disable_sriov(wx); + wx_sriov_reinit(wx); +} + +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct wx *wx = pci_get_drvdata(pdev); + int err; + + if (!num_vfs) { + if (!pci_vfs_assigned(pdev)) { + wx_pci_sriov_disable(pdev); + return 0; + } + + wx_err(wx, "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + err = wx_pci_sriov_enable(pdev, num_vfs); + if (err) + return err; + + return num_vfs; +} +EXPORT_SYMBOL(wx_pci_sriov_configure); + +static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr) +{ + u8 hw_addr[ETH_ALEN]; + int ret = 0; + + ether_addr_copy(hw_addr, mac_addr); + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + ret = wx_add_mac_filter(wx, hw_addr, vf); + if (ret >= 0) + ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr); + else + eth_zero_addr(wx->vfinfo[vf].vf_mac_addr); + + return ret; +} + +static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe) +{ + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + + vmolr |= WX_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= WX_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_AUPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf) +{ + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + WX_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(wx, WX_TDM_VLAN_INS(vf), vmvir); +} + +static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf) +{ + if (!vid && !add) + return 0; + + return wx_set_vfta(wx, vid, vf, (bool)add); +} + +static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 pfvfspoof; + + pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index)); + if (enable) + pfvfspoof |= BIT(vf_bit); + else + pfvfspoof &= ~BIT(vf_bit); + wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof); +} + +static void wx_write_qde(struct wx *wx, u32 vf, u32 qde) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0, n = vf * q_per_pool / 32; + u32 i = vf * q_per_pool; + + reg = rd32(wx, WX_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(wx, WX_RDM_PF_QDE(n), reg); +} + +static void wx_clear_vmvir(struct wx *wx, u32 vf) +{ + wr32(wx, WX_TDM_VLAN_INS(vf), 0); +} + +static void wx_ping_vf(struct wx *wx, int vf) +{ + u32 ping = WX_PF_CONTROL_MSG; + + if (wx->vfinfo[vf].clear_to_send) + ping |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, &ping, 1, vf); +} + +static void wx_set_vf_rx_tx(struct wx *wx, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + + reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index)); + reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index)); + + if (wx->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | BIT(vf_bit); + reg_req_rx = reg_cur_rx | BIT(vf_bit); + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(wx, WX_TDM_VF_TE(index), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(wx, WX_RDM_VF_RE(index), reg_req_rx); + } else { + reg_req_tx = BIT(vf_bit); + reg_req_rx = BIT(vf_bit); + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx); + } +} + +static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + + msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos) + msgbuf[WX_VF_TRANS_VLAN] = 1; + else + msgbuf[WX_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[WX_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static void wx_vf_reset_event(struct wx *wx, u16 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(wx->netdev); + + /* add PF assigned VLAN */ + wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + wx_set_vmolr(wx, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + wx_clear_vmvir(wx, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + wx_set_vmvir(wx, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + wx_set_vmvir(wx, vfinfo->pf_vlan, + wx->default_up, vf); + } + + /* reset multicast table array for vf */ + wx->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + wx_set_rx_mode(wx->netdev); + + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + /* reset VF api back to unknown */ + wx->vfinfo[vf].vf_api = wx_mbox_api_null; +} + +static void wx_vf_reset_msg(struct wx *wx, u16 vf) +{ + const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr; + struct net_device *dev = wx->netdev; + u32 msgbuf[5] = {0, 0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 reg = 0, index, vf_bit; + int pf_max_frame; + + /* reset the filters for the device */ + wx_vf_reset_event(wx, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + wx_set_vf_mac(wx, vf, vf_mac); + + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + /* force drop enable for all VF Rx queues */ + wx_write_qde(wx, vf, 1); + + /* set transmit and receive for vf */ + wx_set_vf_rx_tx(wx, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg = BIT(vf_bit); + wr32(wx, WX_RDM_VFRE_CLR(index), reg); + + /* enable VF mailbox for further messages */ + wx->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = WX_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_err(wx, "VF %d has no MAC address assigned", vf); + } + + msgbuf[3] = wx->mac.mc_filter_type; + wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf); +} + +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf) +{ + const u8 *new_mac = ((u8 *)(&msgbuf[1])); + int ret; + + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + + if (wx->vfinfo[vf].pf_set_mac && + memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) { + wx_err(wx, + "VF %d attempt to set a MAC but it already had a MAC.", + vf); + return -EBUSY; + } + + ret = wx_set_vf_mac(wx, vf, new_mac); + if (ret < 0) + return ret; + + return 0; +} + +static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK) + >> WX_VT_MSGINFO_SHIFT; + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + u32 vector_bit, vector_reg, mta_reg, i; + u16 *hash_list = (u16 *)&msgbuf[1]; + + /* only so many hash values supported */ + entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES); + vfinfo->num_vf_mc_hashes = entries; + + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]); + mta_reg = wx->mac.mta_shadow[vector_reg]; + mta_reg |= BIT(vector_bit); + wx->mac.mta_shadow[vector_reg] = mta_reg; + wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf) +{ + u32 index, vf_bit, vfre; + u32 max_frs, reg_val; + + /* determine VF receive enable location */ + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + vfre = rd32(wx, WX_RDM_VF_RE(index)); + vfre |= BIT(vf_bit); + wr32(wx, WX_RDM_VF_RE(index), vfre); + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA)) + wr32(wx, WX_MAC_WDG_TIMEOUT, + max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA); +} + +static int wx_find_vlvf_entry(struct wx *wx, u32 vlan) +{ + int regindex; + u32 vlvf; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) + regindex = -EINVAL; + + return regindex; +} + +static int wx_set_vf_macvlan(struct wx *wx, + u16 vf, int index, unsigned char *mac_addr) +{ + struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; + + if (index <= 1) { + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + wx_del_mac_filter(wx, entry->vf_macvlan, vf); + } + } + } + + if (!index) + return 0; + + entry = NULL; + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->free) + break; + } + + if (!entry || !entry->free) + return -ENOSPC; + + retval = wx_add_mac_filter(wx, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK); + int ret; + + if (add) + wx->vfinfo[vf].vlan_count++; + else if (wx->vfinfo[vf].vlan_count) + wx->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && wx->netdev->flags & IFF_PROMISC) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + + ret = wx_set_vf_vlan(wx, add, vid, vf); + if (!ret && wx->vfinfo[vf].spoofchk_enabled) + wx_set_vlan_anti_spoofing(wx, true, vf); + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && wx->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + int reg_ndx; + + reg_ndx = wx_find_vlvf_entry(wx, vid); + if (reg_ndx < 0) + return -ENOSPC; + wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits &= ~BIT(VMDQ_P(0)); + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); + } else { + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits &= ~BIT(VMDQ_P(0) % 32); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); + } + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + } + + return 0; +} + +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> + WX_VT_MSGINFO_SHIFT; + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int err; + + if (wx->vfinfo[vf].pf_set_mac && index > 0) { + wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf); + return -EINVAL; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + /* If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (wx->vfinfo[vf].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, vf, false); + } + + err = wx_set_vf_macvlan(wx, vf, index, new_mac); + if (err == -ENOSPC) + wx_err(wx, + "VF %d request MACVLAN filter but there is no space\n", + vf); + if (err < 0) + return err; + + return 0; +} + +static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case wx_mbox_api_13: + wx->vfinfo[vf].vf_api = api; + return 0; + default: + wx_err(wx, "VF %d requested invalid api version %u\n", vf, api); + return -EINVAL; + } +} + +static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf) +{ + msgbuf[1] = wx->vfinfo[vf].link_enable; + + return 0; +} + +static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf) +{ + unsigned long fw_version = 0ULL; + int ret = 0; + + ret = kstrtoul(wx->eeprom_id, 16, &fw_version); + if (ret) + return -EOPNOTSUPP; + msgbuf[1] = fw_version; + + return 0; +} + +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + if (wx->vfinfo[vf].xcast_mode == xcast_mode) + return 0; + + switch (xcast_mode) { + case WXVF_XCAST_MODE_NONE: + disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case WXVF_XCAST_MODE_MULTI: + disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE; + break; + case WXVF_XCAST_MODE_ALLMULTI: + disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE; + break; + case WXVF_XCAST_MODE_PROMISC: + disable = 0; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); + + wx->vfinfo[vf].xcast_mode = xcast_mode; + msgbuf[1] = xcast_mode; + + return 0; +} + +static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf) +{ + u16 mbx_size = WX_VXMAILBOX_SIZE; + u32 msgbuf[WX_VXMAILBOX_SIZE]; + int retval; + + retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf); + if (retval) { + wx_err(wx, "Error receiving message from VF\n"); + return; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK)) + return; + + if (msgbuf[0] == WX_VF_RESET) { + wx_vf_reset_msg(wx, vf); + return; + } + + /* until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!wx->vfinfo[vf].clear_to_send) { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_write_mbx_pf(wx, msgbuf, 1, vf); + return; + } + + switch ((msgbuf[0] & U16_MAX)) { + case WX_VF_SET_MAC_ADDR: + retval = wx_set_vf_mac_addr(wx, msgbuf, vf); + break; + case WX_VF_SET_MULTICAST: + wx_set_vf_multicasts(wx, msgbuf, vf); + retval = 0; + break; + case WX_VF_SET_VLAN: + retval = wx_set_vf_vlan_msg(wx, msgbuf, vf); + break; + case WX_VF_SET_LPE: + wx_set_vf_lpe(wx, msgbuf[1], vf); + retval = 0; + break; + case WX_VF_SET_MACVLAN: + retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf); + break; + case WX_VF_API_NEGOTIATE: + retval = wx_negotiate_vf_api(wx, msgbuf, vf); + break; + case WX_VF_GET_QUEUES: + retval = wx_get_vf_queues(wx, msgbuf, vf); + break; + case WX_VF_GET_LINK_STATE: + retval = wx_get_vf_link_state(wx, msgbuf, vf); + break; + case WX_VF_GET_FW_VERSION: + retval = wx_get_fw_version(wx, msgbuf, vf); + break; + case WX_VF_UPDATE_XCAST_MODE: + retval = wx_update_vf_xcast_mode(wx, msgbuf, vf); + break; + case WX_VF_BACKUP: + break; + default: + wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]); + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + else + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + + wx_write_mbx_pf(wx, msgbuf, mbx_size, vf); +} + +static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf) +{ + u32 msg = WX_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!wx->vfinfo[vf].clear_to_send) + wx_write_mbx_pf(wx, &msg, 1, vf); +} + +void wx_msg_task(struct wx *wx) +{ + u16 vf; + + for (vf = 0; vf < wx->num_vfs; vf++) { + /* process any reset requests */ + if (!wx_check_for_rst_pf(wx, vf)) + wx_vf_reset_event(wx, vf); + + /* process any messages pending */ + if (!wx_check_for_msg_pf(wx, vf)) + wx_rcv_msg_from_vf(wx, vf); + + /* process any acks */ + if (!wx_check_for_ack_pf(wx, vf)) + wx_rcv_ack_from_vf(wx, vf); + } +} +EXPORT_SYMBOL(wx_msg_task); + +void wx_disable_vf_rx_tx(struct wx *wx) +{ + wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX); + if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) { + wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX); + } +} +EXPORT_SYMBOL(wx_disable_vf_rx_tx); + +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up) +{ + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!wx->num_vfs) + return; + msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up; + if (wx->notify_down) + msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0; i < wx->num_vfs; i++) { + if (wx->vfinfo[i].clear_to_send) + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, msgbuf, 2, i); + } +} +EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status); + +static void wx_set_vf_link_state(struct wx *wx, int vf, int state) +{ + wx->vfinfo[vf].link_state = state; + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (netif_running(wx->netdev)) + wx->vfinfo[vf].link_enable = true; + else + wx->vfinfo[vf].link_enable = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + wx->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + wx->vfinfo[vf].link_enable = false; + break; + } + /* restart the VF */ + wx->vfinfo[vf].clear_to_send = false; + wx_ping_vf(wx, vf); + + wx_set_vf_rx_tx(wx, vf); +} + +void wx_set_all_vfs(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_vfs; i++) + wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state); +} +EXPORT_SYMBOL(wx_set_all_vfs); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h new file mode 100644 index 000000000000..8a3a47bb5815 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_SRIOV_H_ +#define _WX_SRIOV_H_ + +#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m)) +#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m)) +#define WX_VF_ENABLE BIT(31) + +void wx_disable_sriov(struct wx *wx); +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs); +void wx_msg_task(struct wx *wx); +void wx_disable_vf_rx_tx(struct wx *wx); +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up); +void wx_set_all_vfs(struct wx *wx); + +#endif /* _WX_SRIOV_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 4c545b2aa997..7730c9fc3e02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -20,8 +20,13 @@ /* MSI-X capability fields masks */ #define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF #define WX_PCI_LINK_STATUS 0xB2 +#define WX_MAX_PF_MACVLANS 15 +#define WX_MAX_VF_MC_ENTRIES 30 /**************** Global Registers ****************************/ +#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v)) +#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v)) + /* chip control Registers */ #define WX_MIS_PWR 0x10000 #define WX_MIS_RST 0x1000C @@ -76,6 +81,9 @@ #define WX_MAC_LXONOFFRXC 0x11E0C /*********************** Receive DMA registers **************************/ +#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) #define WX_RDM_DRP_PKT 0x12500 #define WX_RDM_PKT_CNT 0x12504 #define WX_RDM_BYTE_CNT_LSB 0x12508 @@ -84,12 +92,17 @@ /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 +#define WX_CFG_PORT_CTL_PFRSTD BIT(14) #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) #define WX_CFG_PORT_CTL_QINQ BIT(2) #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ +#define WX_CFG_PORT_CTL_NUM_VT_NONE 0 +#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1) +#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2) +#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3) /* GPIO Registers */ #define WX_GPIO_DR 0x14800 @@ -112,6 +125,11 @@ /*********************** Transmit DMA registers **************************/ /* transmit global control */ #define WX_TDM_CTL 0x18000 +#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4)) +#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4)) +#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) + /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) @@ -165,6 +183,7 @@ /******************************* PSR Registers *******************************/ /* psr control */ #define WX_PSR_CTL 0x15000 +#define WX_PSR_VM_CTL 0x151B0 /* Header split receive */ #define WX_PSR_CTL_SW_EN BIT(18) #define WX_PSR_CTL_RSC_ACK BIT(17) @@ -201,12 +220,17 @@ #define WX_PSR_1588_CTL_VALID BIT(0) /* mcasst/ucast overflow tbl */ #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i)) +#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i)) #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) +#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */ +#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7) /* VM L2 contorl */ #define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) #define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ #define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */ #define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ #define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ #define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ @@ -245,10 +269,12 @@ #define WX_PSR_VLAN_SWC 0x16220 #define WX_PSR_VLAN_SWC_VM_L 0x16224 #define WX_PSR_VLAN_SWC_VM_H 0x16228 +#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4)) #define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ /* VLAN pool filtering masks */ #define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ #define WX_PSR_VLAN_SWC_ENTRIES 64 +#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0) /********************************* RSEC **************************************/ /* general rsec */ @@ -259,6 +285,13 @@ #define WX_RSC_ST 0x17004 #define WX_RSC_ST_RSEC_RDY BIT(0) +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4)) +#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) +/* Per VF Port VLAN insertion rules */ +#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/ + /****************************** TDB ******************************************/ #define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) #define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ @@ -328,6 +361,9 @@ #define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ + +#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0) +#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2 /* MDIO Registers */ #define WX_MSCA 0x11200 #define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) @@ -417,6 +453,15 @@ enum WX_MSCA_CMD_value { /* Number of 80 microseconds we wait for PCI Express master disable */ #define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 +#define WX_RSS_64Q_MASK 0x3F +#define WX_RSS_8Q_MASK 0x7 +#define WX_RSS_4Q_MASK 0x3 +#define WX_RSS_2Q_MASK 0x1 +#define WX_RSS_DISABLED_MASK 0x0 + +#define WX_VMDQ_4Q_MASK 0x7C +#define WX_VMDQ_2Q_MASK 0x7E + /****************** Manageablility Host Interface defines ********************/ #define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ #define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */ @@ -484,7 +529,7 @@ enum WX_MSCA_CMD_value { #define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ -#define VMDQ_P(p) p +#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset) /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ @@ -778,6 +823,10 @@ struct wx_bus_info { u16 device; }; +struct wx_mbx_info { + u16 size; +}; + struct wx_thermal_sensor_data { s16 temp; s16 alarm_thresh; @@ -789,13 +838,14 @@ enum wx_mac_type { wx_mac_sp, wx_mac_em, wx_mac_aml, + wx_mac_aml40, }; -enum sp_media_type { - sp_media_unknown = 0, - sp_media_fiber, - sp_media_copper, - sp_media_backplane +enum wx_media_type { + wx_media_unknown = 0, + wx_media_fiber, + wx_media_copper, + wx_media_backplane }; enum em_mac_type { @@ -1051,6 +1101,7 @@ struct wx_ring_feature { enum wx_ring_f_enum { RING_F_NONE = 0, + RING_F_VMDQ, RING_F_RSS, RING_F_FDIR, RING_F_ARRAY_SIZE /* must be last in enum set */ @@ -1103,11 +1154,43 @@ enum wx_state { WX_STATE_SWFW_BUSY, WX_STATE_PTP_RUNNING, WX_STATE_PTP_TX_IN_PROGRESS, + WX_STATE_SERVICE_SCHED, WX_STATE_NBITS /* must be last */ }; +struct vf_data_storage { + struct pci_dev *vfdev; + unsigned char vf_mac_addr[ETH_ALEN]; + bool spoofchk_enabled; + bool link_enable; + bool trusted; + int xcast_mode; + unsigned int vf_api; + bool clear_to_send; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + bool pf_set_mac; + + u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlan_count; + int link_state; +}; + +struct vf_macvlans { + struct list_head mvlist; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + enum wx_pf_flags { + WX_FLAG_MULTI_64_FUNC, WX_FLAG_SWFW_RING, + WX_FLAG_VMDQ_ENABLED, + WX_FLAG_VLAN_PROMISC, + WX_FLAG_SRIOV_ENABLED, WX_FLAG_FDIR_CAPABLE, WX_FLAG_FDIR_HASH, WX_FLAG_FDIR_PERFECT, @@ -1115,6 +1198,8 @@ enum wx_pf_flags { WX_FLAG_RX_HWTSTAMP_ENABLED, WX_FLAG_RX_HWTSTAMP_IN_REGISTER, WX_FLAG_PTP_PPS_ENABLED, + WX_FLAG_NEED_LINK_CONFIG, + WX_FLAG_NEED_SFP_RESET, WX_PF_FLAGS_NBITS /* must be last */ }; @@ -1128,9 +1213,10 @@ struct wx { struct pci_dev *pdev; struct net_device *netdev; struct wx_bus_info bus; + struct wx_mbx_info mbx; struct wx_mac_info mac; enum em_mac_type mac_type; - enum sp_media_type media_type; + enum wx_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; struct wx_fc_info fc; @@ -1151,6 +1237,9 @@ struct wx { u8 swfw_index; /* PHY stuff */ + bool notify_down; + int adv_speed; + int adv_duplex; unsigned int link; int speed; int duplex; @@ -1182,6 +1271,8 @@ struct wx { struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; struct wx_ring *rx_ring[64]; struct wx_q_vector *q_vector[64]; + int num_rx_pools; + int num_rx_queues_per_pool; unsigned int queues_per_pool; struct msix_entry *msix_q_entries; @@ -1203,6 +1294,7 @@ struct wx { u32 wol; u16 bd_number; + bool default_up; struct wx_hw_stats stats; u64 tx_busy; @@ -1211,10 +1303,16 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + unsigned long fwd_bitmask; u32 atr_sample_rate; void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); void (*configure_fdir)(struct wx *wx); + int (*setup_tc)(struct net_device *netdev, u8 tc); void (*do_reset)(struct net_device *netdev); int (*ptp_setup_sdp)(struct wx *wx); @@ -1239,6 +1337,9 @@ struct wx { struct ptp_clock_info ptp_caps; struct kernel_hwtstamp_config tstamp_config; struct sk_buff *ptp_tx_skb; + + struct timer_list service_timer; + struct work_struct service_task; }; #define WX_INTR_ALL (~0ULL) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 91b3055a5a9f..b5022c49dc5e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -15,6 +15,8 @@ #include "../libwx/wx_hw.h" #include "../libwx/wx_lib.h" #include "../libwx/wx_ptp.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" #include "ngbe_hw.h" @@ -129,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx) wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; + wx->mbx.size = WX_VXMAILBOX_SIZE; + wx->setup_tc = ngbe_setup_tc; + set_bit(0, &wx->fwd_bitmask); + return 0; } @@ -200,12 +206,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr) { - struct wx *wx = data; - u32 eicr; - - eicr = wx_misc_isb(wx, WX_ISB_MISC); + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + wx_msg_task(wx); if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) wx_ptp_check_pps_event(wx); @@ -217,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data) +{ + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + + return __ngbe_msix_misc(wx, eicr); +} + +static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + if (!eicr) { + /* queue */ + q_vector = wx->q_vector[0]; + napi_schedule_irqoff(&q_vector->napi); + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, true); + return IRQ_HANDLED; + } + + return __ngbe_msix_misc(wx, eicr); +} + /** * ngbe_request_msix_irqs - Initialize MSI-X interrupts * @wx: board private structure @@ -249,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entry->vector, - ngbe_msix_other, 0, netdev->name, wx); + /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1 + * for queue. But when num_vfs == 7, vector[1] is assigned to vf6. + * Misc and queue should reuse interrupt vector[0]. + */ + if (wx->num_vfs == 7) + err = request_irq(wx->msix_entry->vector, + ngbe_misc_and_queue, 0, netdev->name, wx); + else + err = request_irq(wx->msix_entry->vector, + ngbe_msix_misc, 0, netdev->name, wx); if (err) { wx_err(wx, "request_irq for msix_other failed: %d\n", err); @@ -302,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx) struct net_device *netdev = wx->netdev; u32 i; + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + wx->notify_down = true; + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); + wx->notify_down = false; + + /* Disable all VFTE/VFRE TX/RX */ + wx_disable_vf_rx_tx(wx); + } + /* disable all enabled rx queues */ for (i = 0; i < wx->num_rx_queues; i++) /* this call also flushes the previous write */ @@ -324,12 +381,19 @@ static void ngbe_disable_device(struct wx *wx) wx_update_stats(wx); } +static void ngbe_reset(struct wx *wx) +{ + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, wx->mac.addr); + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset(wx); +} + void ngbe_down(struct wx *wx) { phylink_stop(wx->phylink); ngbe_disable_device(wx); - if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) - wx_ptp_reset(wx); + ngbe_reset(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } @@ -352,6 +416,11 @@ void ngbe_up(struct wx *wx) ngbe_sfp_modules_txrx_powerctl(wx, true); phylink_start(wx->phylink); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD); + if (wx->num_vfs) + wx_ping_all_vfs_with_link_status(wx, false); } /** @@ -518,6 +587,7 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, .ndo_fix_features = wx_fix_features, + .ndo_features_check = wx_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, @@ -596,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The emerald supports up to 8 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = ngbe_driver_name; ngbe_set_ethtool_ops(netdev); netdev->netdev_ops = &ngbe_netdev_ops; @@ -744,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev) struct net_device *netdev; netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, @@ -803,6 +878,7 @@ static struct pci_driver ngbe_driver = { .suspend = ngbe_suspend, .resume = ngbe_resume, .shutdown = ngbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(ngbe_driver); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index ea1d7e9a91f3..c63bb6e6f405 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -9,6 +9,7 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" @@ -70,6 +71,8 @@ static void ngbe_mac_link_down(struct phylink_config *config, wx->speed = SPEED_UNKNOWN; if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void ngbe_mac_link_up(struct phylink_config *config, @@ -114,6 +117,8 @@ static void ngbe_mac_link_up(struct phylink_config *config, wx->last_rx_ptp_check = jiffies; if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static const struct phylink_mac_ops ngbe_mac_ops = { diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 992adbb98c7d..bb74263f0498 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -73,12 +73,14 @@ #define NGBE_PX_MISC_IEN_TIMESYNC BIT(11) #define NGBE_PX_MISC_IEN_ETH_LK BIT(18) #define NGBE_PX_MISC_IEN_INT_ERR BIT(20) +#define NGBE_PX_MISC_IC_VF_MBOX BIT(23) #define NGBE_PX_MISC_IEN_GPIO BIT(26) #define NGBE_PX_MISC_IEN_MASK ( \ NGBE_PX_MISC_IEN_DEV_RST | \ NGBE_PX_MISC_IEN_TIMESYNC | \ NGBE_PX_MISC_IEN_ETH_LK | \ NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IC_VF_MBOX | \ NGBE_PX_MISC_IEN_GPIO) /* Extended Interrupt Cause Read */ @@ -134,6 +136,7 @@ #define NGBE_MAX_RXD 8192 #define NGBE_MIN_RXD 128 +#define NGBE_MAX_VFS_DRV_LIMIT 7 extern char ngbe_driver_name[]; void ngbe_down(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index f74576fe7062..c757fa95e58e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \ txgbe_phy.o \ txgbe_irq.o \ txgbe_fdir.o \ - txgbe_ethtool.o + txgbe_ethtool.o \ + txgbe_aml.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c new file mode 100644 index 000000000000..7dbcf41750c1 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include <linux/phylink.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/phy.h> + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" +#include "txgbe_type.h" +#include "txgbe_aml.h" +#include "txgbe_hw.h" + +void txgbe_gpio_init_aml(struct wx *wx) +{ + u32 status; + + wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + + status = rd32(wx, WX_GPIO_INTSTATUS); + for (int i = 0; i < 6; i++) { + if (status & BIT(i)) + wr32(wx, WX_GPIO_EOI, BIT(i)); + } +} + +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data) +{ + struct txgbe *txgbe = data; + struct wx *wx = txgbe->wx; + u32 status; + + wr32(wx, WX_GPIO_INTMASK, 0xFF); + status = rd32(wx, WX_GPIO_INTSTATUS); + if (status & TXGBE_GPIOBIT_2) { + set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2); + wx_service_event_schedule(wx); + } + if (status & TXGBE_GPIOBIT_3) { + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + wx_service_event_schedule(wx); + wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3); + } + + wr32(wx, WX_GPIO_INTMASK, 0); + return IRQ_HANDLED; +} + +int txgbe_test_hostif(struct wx *wx) +{ + struct txgbe_hic_ephy_getlink buffer; + + if (wx->mac.type != wx_mac_aml) + return 0; + + buffer.hdr.cmd = FW_PHY_GET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer) +{ + buffer->hdr.cmd = FW_READ_SFP_INFO_CMD; + buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) - + sizeof(struct wx_hic_hdr); + buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + return wx_host_interface_command(wx, (u32 *)buffer, + sizeof(struct txgbe_hic_i2c_read), + WX_HI_COMMAND_TIMEOUT, true); +} + +static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex) +{ + struct txgbe_hic_ephy_setlink buffer; + + buffer.hdr.cmd = FW_PHY_SET_LINK_CMD; + buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) - + sizeof(struct wx_hic_hdr); + buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + + switch (speed) { + case SPEED_25000: + buffer.speed = TXGBE_LINK_SPEED_25GB_FULL; + break; + case SPEED_10000: + buffer.speed = TXGBE_LINK_SPEED_10GB_FULL; + break; + } + + buffer.fec_mode = TXGBE_PHY_FEC_AUTO; + buffer.autoneg = autoneg; + buffer.duplex = duplex; + + return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer), + WX_HI_COMMAND_TIMEOUT, true); +} + +static void txgbe_get_link_capabilities(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_25000; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces)) + wx->adv_speed = SPEED_10000; + else + wx->adv_speed = SPEED_UNKNOWN; + + wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ? + DUPLEX_HALF : DUPLEX_FULL; +} + +static void txgbe_get_phy_link(struct wx *wx, int *speed) +{ + u32 status; + + status = rd32(wx, TXGBE_CFG_PORT_ST); + if (!(status & TXGBE_CFG_PORT_ST_LINK_UP)) + *speed = SPEED_UNKNOWN; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G) + *speed = SPEED_25000; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G) + *speed = SPEED_10000; + else + *speed = SPEED_UNKNOWN; +} + +int txgbe_set_phy_link(struct wx *wx) +{ + int speed, err; + u32 gpio; + + /* Check RX signal */ + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_3) + return -ENODEV; + + txgbe_get_link_capabilities(wx); + if (wx->adv_speed == SPEED_UNKNOWN) + return -ENODEV; + + txgbe_get_phy_link(wx, &speed); + if (speed == wx->adv_speed) + return 0; + + err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex); + if (err) { + wx_err(wx, "Failed to setup link\n"); + return err; + } + + return 0; +} + +static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct txgbe *txgbe = wx->priv; + + if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE | + TXGBE_SFF_25GBASEER_CAPABLE | + TXGBE_SFF_25GBASELR_CAPABLE)) { + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) { + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) { + phylink_set(modes, 10000baseLR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + + if (phy_interface_empty(interfaces)) { + wx_err(wx, "unsupported SFP module\n"); + return -EINVAL; + } + + phylink_set(modes, Pause); + phylink_set(modes, Asym_Pause); + phylink_set(modes, FIBRE); + txgbe->link_port = PORT_FIBRE; + + if (!linkmode_equal(txgbe->sfp_support, modes)) { + linkmode_copy(txgbe->sfp_support, modes); + phy_interface_and(txgbe->sfp_interfaces, + wx->phylink_config.supported_interfaces, + interfaces); + linkmode_copy(txgbe->advertising, modes); + + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + } + + return 0; +} + +int txgbe_identify_sfp(struct wx *wx) +{ + struct txgbe_hic_i2c_read buffer; + struct txgbe_sfp_id *id; + int err = 0; + u32 gpio; + + gpio = rd32(wx, WX_GPIO_EXT); + if (gpio & TXGBE_GPIOBIT_2) + return -ENODEV; + + err = txgbe_identify_sfp_hostif(wx, &buffer); + if (err) { + wx_err(wx, "Failed to identify SFP module\n"); + return err; + } + + id = &buffer.id; + if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) { + wx_err(wx, "Invalid SFP module\n"); + return -ENODEV; + } + + err = txgbe_sfp_to_linkmodes(wx, id); + if (err) + return err; + + if (gpio & TXGBE_GPIOBIT_3) + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + + return 0; +} + +void txgbe_setup_link(struct wx *wx) +{ + struct txgbe *txgbe = wx->priv; + + phy_interface_zero(txgbe->sfp_interfaces); + linkmode_zero(txgbe->sfp_support); + + txgbe_identify_sfp(wx); +} + +static void txgbe_get_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct wx *wx = phylink_to_wx(config); + int speed; + + txgbe_get_phy_link(wx, &speed); + state->link = speed != SPEED_UNKNOWN; + state->speed = speed; + state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; +} + +static void txgbe_reconfig_mac(struct wx *wx) +{ + u32 wdg, fc; + + wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); + fc = rd32(wx, WX_MAC_RX_FLOW_CTRL); + + wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func)); + /* wait for MAC reset complete */ + usleep_range(1000, 1500); + + wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD, + TXGBE_MAC_MISC_CTL_LINK_BOTH); + wx_reset_mac(wx); + + wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); + wr32(wx, WX_MAC_RX_FLOW_CTRL, fc); +} + +static void txgbe_mac_link_up_aml(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); + u32 txcfg; + + wx_fc_enable(wx, tx_pause, rx_pause); + + txgbe_reconfig_mac(wx); + + txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG); + txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; + + switch (speed) { + case SPEED_25000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + break; + case SPEED_10000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G; + break; + default: + break; + } + + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); + wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); +} + +static void txgbe_mac_link_down_aml(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct wx *wx = phylink_to_wx(config); + + wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0); + wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); +} + +static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static const struct phylink_mac_ops txgbe_mac_ops_aml = { + .mac_config = txgbe_mac_config_aml, + .mac_link_down = txgbe_mac_link_down_aml, + .mac_link_up = txgbe_mac_link_up_aml, +}; + +int txgbe_phylink_init_aml(struct txgbe *txgbe) +{ + struct phylink_link_state state; + struct phylink_config *config; + struct wx *wx = txgbe->wx; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_25000FD | MAC_10000FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->get_fixed_state = txgbe_get_link_state; + + phy_mode = PHY_INTERFACE_MODE_25GBASER; + __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); + + phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + state.speed = SPEED_25000; + state.duplex = DUPLEX_FULL; + err = phylink_set_fixed_link(phylink, &state); + if (err) { + wx_err(wx, "Failed to set fixed link\n"); + return err; + } + + wx->phylink = phylink; + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h new file mode 100644 index 000000000000..25d4971ca0d9 --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_AML_H_ +#define _TXGBE_AML_H_ + +void txgbe_gpio_init_aml(struct wx *wx); +irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data); +int txgbe_test_hostif(struct wx *wx); +int txgbe_set_phy_link(struct wx *wx); +int txgbe_identify_sfp(struct wx *wx); +void txgbe_setup_link(struct wx *wx); +int txgbe_phylink_init_aml(struct txgbe *txgbe); + +#endif /* _TXGBE_AML_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 78999d484f18..a4753402660e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -12,6 +12,31 @@ #include "txgbe_fdir.h" #include "txgbe_ethtool.h" +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + struct txgbe *txgbe = wx->priv; + int err; + + if (wx->mac.type == wx_mac_aml40) + return -EOPNOTSUPP; + + err = wx_get_link_ksettings(netdev, cmd); + if (err) + return err; + + if (wx->mac.type == wx_mac_sp) + return 0; + + cmd->base.port = txgbe->link_port; + cmd->base.autoneg = AUTONEG_DISABLE; + linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support); + linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); + + return 0; +} + static int txgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, @@ -342,12 +367,19 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, queue = TXGBE_RDB_FDIR_DROP_QUEUE; } else { u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); - if (ring >= wx->num_rx_queues) + if (!vf && ring >= wx->num_rx_queues) + return -EINVAL; + else if (vf && (vf > wx->num_vfs || + ring >= wx->num_rx_queues_per_pool)) return -EINVAL; /* Map the ring onto the absolute queue index */ - queue = wx->rx_ring[ring]->reg_idx; + if (!vf) + queue = wx->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring; } /* Don't allow indexes to exist outside of available space */ @@ -510,7 +542,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = wx_get_link_ksettings, + .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = wx_set_link_ksettings, .get_sset_count = wx_get_sset_count, .get_strings = wx_get_strings, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h index ace1b3571012..66dbc8ec1bb6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h @@ -4,6 +4,8 @@ #ifndef _TXGBE_ETHTOOL_H_ #define _TXGBE_ETHTOOL_H_ +int txgbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); void txgbe_set_ethtool_ops(struct net_device *netdev); #endif /* _TXGBE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c index ef50efbaec0f..a84010828551 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c @@ -307,6 +307,7 @@ void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype) int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) { u32 fdirm = 0, fdirtcpm = 0, flex = 0; + int index, offset; /* Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that @@ -352,15 +353,17 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); - flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); - flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + index = VMDQ_P(0) / 4; + offset = VMDQ_P(0) % 4; + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index)); + flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8)); flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | - TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8); switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ - flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK; + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8); break; case 0xFFFF: break; @@ -368,7 +371,7 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) wx_err(wx, "Error on flexible byte mask\n"); return -EINVAL; } - wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex); /* store the TCP/UDP port masks, bit reversed from port layout */ fdirtcpm = ntohs(input_mask->formatted.dst_port); @@ -516,14 +519,16 @@ static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl) static void txgbe_init_fdir_signature(struct wx *wx) { u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + int index = VMDQ_P(0) / 4; + int offset = VMDQ_P(0) % 4; u32 flex = 0; - flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); - flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index)); + flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8)); flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | - TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); - wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8); + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex); /* Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index a054b259d435..e551ae0e2069 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -188,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx) if (status != 0) return status; - if (wx->media_type != sp_media_copper) { + if (wx->media_type != wx_media_copper) { u32 val; val = WX_MIS_RST_LAN_RST(wx->bus.func); @@ -218,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx) * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; wx_init_rx_addrs(wx); pci_set_master(wx->pdev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 8658a51ee810..20b9a28bcb55 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -6,10 +6,13 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_phy.h" #include "txgbe_irq.h" +#include "txgbe_aml.h" /** * txgbe_irq_enable - Enable default interrupt generation settings @@ -18,7 +21,14 @@ **/ void txgbe_irq_enable(struct wx *wx, bool queues) { - wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); + u32 misc_ien = TXGBE_PX_MISC_IEN_MASK; + + if (wx->mac.type == wx_mac_aml) { + misc_ien |= TXGBE_PX_MISC_GPIO; + txgbe_gpio_init_aml(wx); + } + + wr32(wx, WX_PX_MISC_IEN, misc_ien); /* unmask interrupt */ wx_intr_enable(wx, TXGBE_INTR_MISC); @@ -80,6 +90,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe) IRQF_ONESHOT, "txgbe-link-irq", txgbe); } +static int txgbe_request_gpio_irq(struct txgbe *txgbe) +{ + txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); + return request_threaded_irq(txgbe->gpio_irq, NULL, + txgbe_gpio_irq_handler_aml, + IRQF_ONESHOT, "txgbe-gpio-irq", txgbe); +} + static const struct irq_chip txgbe_irq_chip = { .name = "txgbe-misc-irq", }; @@ -109,8 +127,15 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) struct wx *wx = txgbe->wx; u32 eicr; - if (wx->pdev->msix_enabled) + if (wx->pdev->msix_enabled) { + eicr = wx_misc_isb(wx, WX_ISB_MISC); + txgbe->eicr = eicr; + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) { + wx_msg_task(txgbe->wx); + wx_intr_enable(wx, TXGBE_INTR_MISC); + } return IRQ_WAKE_THREAD; + } eicr = wx_misc_isb(wx, WX_ISB_VEC0); if (!eicr) { @@ -129,6 +154,8 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) q_vector = wx->q_vector[0]; napi_schedule_irqoff(&q_vector->napi); + txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC); + return IRQ_WAKE_THREAD; } @@ -140,13 +167,22 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) unsigned int sub_irq; u32 eicr; - eicr = wx_misc_isb(wx, WX_ISB_MISC); + eicr = txgbe->eicr; if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_ETH_AN)) { sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); handle_nested_irq(sub_irq); nhandled++; } + if (eicr & TXGBE_PX_MISC_GPIO) { + sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO); + handle_nested_irq(sub_irq); + nhandled++; + } + if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) { + wx_ptp_check_pps_event(wx); + nhandled++; + } wx_intr_enable(wx, TXGBE_INTR_MISC); return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); @@ -166,9 +202,12 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe) void txgbe_free_misc_irq(struct txgbe *txgbe) { - if (txgbe->wx->mac.type == wx_mac_aml) + if (txgbe->wx->mac.type == wx_mac_aml40) return; + if (txgbe->wx->mac.type == wx_mac_aml) + free_irq(txgbe->gpio_irq, txgbe); + free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); @@ -180,12 +219,12 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int hwirq, err; - if (wx->mac.type == wx_mac_aml) + if (wx->mac.type == wx_mac_aml40) goto skip_sp_irq; - txgbe->misc.nirqs = 1; - txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0, - &txgbe_misc_irq_domain_ops, txgbe); + txgbe->misc.nirqs = TXGBE_IRQ_MAX; + txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0, + &txgbe_misc_irq_domain_ops, txgbe); if (!txgbe->misc.domain) return -ENOMEM; @@ -212,11 +251,20 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) if (err) goto free_msic_irq; + if (wx->mac.type == wx_mac_sp) + goto skip_sp_irq; + + err = txgbe_request_gpio_irq(txgbe); + if (err) + goto free_link_irq; + skip_sp_irq: wx->misc_irq_domain = true; return 0; +free_link_irq: + free_irq(txgbe->link_irq, txgbe); free_msic_irq: free_irq(txgbe->misc.irq, txgbe); del_misc_irq: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 38206a46693b..f3d2778b8e35 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/etherdevice.h> #include <linux/phylink.h> +#include <net/udp_tunnel.h> #include <net/ip.h> #include <linux/if_vlan.h> @@ -15,9 +16,12 @@ #include "../libwx/wx_lib.h" #include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_hw.h" #include "txgbe_phy.h" +#include "txgbe_aml.h" #include "txgbe_irq.h" #include "txgbe_fdir.h" #include "txgbe_ethtool.h" @@ -85,9 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx) return physfns; } +static void txgbe_sfp_detection_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags)) + return; + + /* wait for SFP module ready */ + msleep(200); + + err = txgbe_identify_sfp(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); +} + +static void txgbe_link_config_subtask(struct wx *wx) +{ + int err; + + if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags)) + return; + + err = txgbe_set_phy_link(wx); + if (err) + return; + + clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); +} + +/** + * txgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void txgbe_service_task(struct work_struct *work) +{ + struct wx *wx = container_of(work, struct wx, service_task); + + txgbe_sfp_detection_subtask(wx); + txgbe_link_config_subtask(wx); + + wx_service_event_complete(wx); +} + +static void txgbe_init_service(struct wx *wx) +{ + timer_setup(&wx->service_timer, wx_service_timer, 0); + INIT_WORK(&wx->service_task, txgbe_service_task); + clear_bit(WX_STATE_SERVICE_SCHED, wx->state); +} + static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; + u32 reg; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -96,17 +153,26 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - if (wx->mac.type == wx_mac_aml) { - u32 reg; - + switch (wx->mac.type) { + case wx_mac_aml40: reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; - reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G; wr32(wx, WX_MAC_TX_CFG, reg); txgbe_enable_sec_tx_path(wx); netif_carrier_on(wx->netdev); - } else { + break; + case wx_mac_aml: + /* Enable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0); + txgbe_setup_link(wx); + phylink_start(wx->phylink); + break; + case wx_mac_sp: phylink_start(wx->phylink); + break; + default: + break; } /* clear any pending interrupts, may auto mask */ @@ -117,6 +183,13 @@ static void txgbe_up_complete(struct wx *wx) /* enable transmits */ netif_tx_start_all_queues(netdev); + mod_timer(&wx->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, + WX_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); } static void txgbe_reset(struct wx *wx) @@ -159,12 +232,24 @@ static void txgbe_disable_device(struct wx *wx) wx_irq_disable(wx); wx_napi_disable_all(wx); + timer_delete_sync(&wx->service_timer); + if (wx->bus.func < 2) wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); else wx_err(wx, "%s: invalid bus lan id %d\n", __func__, wx->bus.func); + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + /* Mark all the VFs as inactive */ + for (i = 0; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); + } + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac transmiter */ @@ -188,10 +273,22 @@ void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); - if (wx->mac.type == wx_mac_aml) + + switch (wx->mac.type) { + case wx_mac_aml40: netif_carrier_off(wx->netdev); - else + break; + case wx_mac_aml: + phylink_stop(wx->phylink); + /* Disable TX laser */ + wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1); + break; + case wx_mac_sp: phylink_stop(wx->phylink); + break; + default: + break; + } wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -221,9 +318,11 @@ static void txgbe_init_type_code(struct wx *wx) case TXGBE_DEV_ID_AML5110: case TXGBE_DEV_ID_AML5025: case TXGBE_DEV_ID_AML5125: + wx->mac.type = wx_mac_aml; + break; case TXGBE_DEV_ID_AML5040: case TXGBE_DEV_ID_AML5140: - wx->mac.type = wx_mac_aml; + wx->mac.type = wx_mac_aml40; break; default: wx->mac.type = wx_mac_unknown; @@ -232,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx) switch (device_type) { case TXGBE_ID_SFP: - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; break; case TXGBE_ID_XAUI: case TXGBE_ID_SGMII: - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; case TXGBE_ID_KR_KX_KX4: case TXGBE_ID_MAC_XAUI: case TXGBE_ID_MAC_SGMII: - wx->media_type = sp_media_backplane; + wx->media_type = wx_media_backplane; break; case TXGBE_ID_SFI_XAUI: if (wx->bus.func == 0) - wx->media_type = sp_media_fiber; + wx->media_type = wx_media_fiber; else - wx->media_type = sp_media_copper; + wx->media_type = wx_media_copper; break; default: - wx->media_type = sp_media_unknown; + wx->media_type = wx_media_unknown; break; } } @@ -264,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx) u16 msix_count = 0; int err; - wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; - wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; - wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; - wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; - wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; - wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; - wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; + wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES; + wx->mac.max_tx_queues = TXGBE_MAX_TXQ; + wx->mac.max_rx_queues = TXGBE_MAX_RXQ; + wx->mac.mcft_size = TXGBE_MC_TBL_SIZE; + wx->mac.vft_size = TXGBE_VFT_TBL_SIZE; + wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE; + wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ; /* PCI config space info */ err = wx_sw_init(wx); @@ -299,6 +398,7 @@ static int txgbe_sw_init(struct wx *wx) wx->configure_fdir = txgbe_configure_fdir; set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; @@ -307,17 +407,21 @@ static int txgbe_sw_init(struct wx *wx) /* set default ring sizes */ wx->tx_ring_count = TXGBE_DEFAULT_TXD; wx->rx_ring_count = TXGBE_DEFAULT_RXD; + wx->mbx.size = WX_VXMAILBOX_SIZE; /* set default work limits */ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + wx->setup_tc = txgbe_setup_tc; wx->do_reset = txgbe_do_reset; + set_bit(0, &wx->fwd_bitmask); switch (wx->mac.type) { case wx_mac_sp: break; case wx_mac_aml: + case wx_mac_aml40: set_bit(WX_FLAG_SWFW_RING, wx->flags); wx->swfw_index = 0; break; @@ -516,6 +620,39 @@ void txgbe_do_reset(struct net_device *netdev) txgbe_reset(wx); } +static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) +{ + struct wx *wx = netdev_priv(dev); + struct udp_tunnel_info ti; + + udp_tunnel_nic_get_port(dev, table, 0, &ti); + switch (ti.type) { + case UDP_TUNNEL_TYPE_VXLAN: + wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port)); + break; + case UDP_TUNNEL_TYPE_VXLAN_GPE: + wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port)); + break; + case UDP_TUNNEL_TYPE_GENEVE: + wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port)); + break; + default: + break; + } + + return 0; +} + +static const struct udp_tunnel_nic_info txgbe_udp_tunnels = { + .sync_table = txgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -524,6 +661,7 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_features = wx_set_features, .ndo_fix_features = wx_fix_features, + .ndo_features_check = wx_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, @@ -604,9 +742,14 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The sapphire supports up to 63 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = txgbe_driver_name; txgbe_set_ethtool_ops(netdev); netdev->netdev_ops = &txgbe_netdev_ops; + netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels; /* setup the private structure */ err = txgbe_sw_init(wx); @@ -652,6 +795,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_GRO; netdev->features |= NETIF_F_GRO; + netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -673,9 +817,11 @@ static int txgbe_probe(struct pci_dev *pdev, eth_hw_addr_set(netdev, wx->mac.perm_addr); wx_mac_set_default_filter(wx, wx->mac.perm_addr); + txgbe_init_service(wx); + err = wx_init_interrupt_scheme(wx); if (err) - goto err_free_mac_table; + goto err_cancel_service; /* Save off EEPROM version number and Option Rom version which * together make a unique identify for the eeprom @@ -718,6 +864,13 @@ static int txgbe_probe(struct pci_dev *pdev, if (etrack_id < 0x20010) dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n"); + err = txgbe_test_hostif(wx); + if (err != 0) { + dev_err(&pdev->dev, "Mismatched Firmware version\n"); + err = -EIO; + goto err_release_hw; + } + txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL); if (!txgbe) { err = -ENOMEM; @@ -768,6 +921,9 @@ err_free_misc_irq: err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); +err_cancel_service: + timer_delete_sync(&wx->service_timer); + cancel_work_sync(&wx->service_task); err_free_mac_table: kfree(wx->rss_key); kfree(wx->mac_table); @@ -794,7 +950,10 @@ static void txgbe_remove(struct pci_dev *pdev) struct txgbe *txgbe = wx->priv; struct net_device *netdev; + cancel_work_sync(&wx->service_task); + netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); txgbe_remove_phy(txgbe); @@ -817,11 +976,12 @@ static struct pci_driver txgbe_driver = { .probe = txgbe_probe, .remove = txgbe_remove, .shutdown = txgbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(txgbe_driver); MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl); MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>"); -MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver"); +MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 85f022ceef4f..03f1b9bc604d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -16,8 +16,11 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" #include "../libwx/wx_ptp.h" +#include "../libwx/wx_sriov.h" +#include "../libwx/wx_mbx.h" #include "../libwx/wx_hw.h" #include "txgbe_type.h" +#include "txgbe_aml.h" #include "txgbe_phy.h" #include "txgbe_hw.h" @@ -163,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi struct wx *wx = phylink_to_wx(config); struct txgbe *txgbe = wx->priv; - if (wx->media_type != sp_media_copper) + if (wx->media_type != wx_media_copper) return txgbe->pcs; return NULL; @@ -184,6 +187,8 @@ static void txgbe_mac_link_down(struct phylink_config *config, wx->speed = SPEED_UNKNOWN; if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void txgbe_mac_link_up(struct phylink_config *config, @@ -225,6 +230,8 @@ static void txgbe_mac_link_up(struct phylink_config *config, wx->last_rx_ptp_check = jiffies; if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, @@ -272,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; - if (wx->media_type == sp_media_copper) { + if (wx->media_type == wx_media_copper) { phy_mode = PHY_INTERFACE_MODE_XAUI; __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); } else { @@ -312,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data) status = rd32(wx, TXGBE_CFG_PORT_ST); up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP); - phylink_pcs_change(txgbe->pcs, up); + if (txgbe->pcs) + phylink_pcs_change(txgbe->pcs, up); + else + phylink_mac_change(wx->phylink, up); return IRQ_HANDLED; } @@ -567,11 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int ret; - if (wx->mac.type == wx_mac_aml) + switch (wx->mac.type) { + case wx_mac_aml40: return 0; - - if (txgbe->wx->media_type == sp_media_copper) - return txgbe_ext_phy_init(txgbe); + case wx_mac_aml: + return txgbe_phylink_init_aml(txgbe); + case wx_mac_sp: + if (wx->media_type == wx_media_copper) + return txgbe_ext_phy_init(txgbe); + break; + default: + break; + } ret = txgbe_swnodes_register(txgbe); if (ret) { @@ -634,13 +651,21 @@ err_unregister_swnode: void txgbe_remove_phy(struct txgbe *txgbe) { - if (txgbe->wx->mac.type == wx_mac_aml) + switch (txgbe->wx->mac.type) { + case wx_mac_aml40: return; - - if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->wx->phylink); + case wx_mac_aml: phylink_destroy(txgbe->wx->phylink); return; + case wx_mac_sp: + if (txgbe->wx->media_type == wx_media_copper) { + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); + return; + } + break; + default: + break; } platform_device_unregister(txgbe->sfp_dev); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h index 3938985355ed..a32b19d71ea2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h @@ -8,4 +8,4 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data); int txgbe_init_phy(struct txgbe *txgbe); void txgbe_remove_phy(struct txgbe *txgbe); -#endif /* _TXGBE_NODE_H_ */ +#endif /* _TXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index f423012dec22..42ec815159e8 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -6,6 +6,8 @@ #include <linux/property.h> #include <linux/irq.h> +#include <linux/phy.h> +#include "../libwx/wx_type.h" /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 @@ -50,6 +52,8 @@ /**************** SP Registers ****************************/ /* chip control Registers */ +#define TXGBE_MIS_RST 0x1000C +#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3) #define TXGBE_MIS_PRB_CTL 0x10010 #define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) /* FMGR Registers */ @@ -62,6 +66,11 @@ #define TXGBE_TS_CTL 0x10300 #define TXGBE_TS_CTL_EVAL_MD BIT(31) +/* MAC Misc Registers */ +#define TXGBE_MAC_MISC_CTL 0x11F00 +#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0) +#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0) +#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1) /* GPIO register bit */ #define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */ #define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */ @@ -73,19 +82,27 @@ /* Extended Interrupt Enable Set */ #define TXGBE_PX_MISC_ETH_LKDN BIT(8) #define TXGBE_PX_MISC_DEV_RST BIT(10) +#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11) #define TXGBE_PX_MISC_ETH_EVENT BIT(17) #define TXGBE_PX_MISC_ETH_LK BIT(18) #define TXGBE_PX_MISC_ETH_AN BIT(19) #define TXGBE_PX_MISC_INT_ERR BIT(20) +#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23) #define TXGBE_PX_MISC_GPIO BIT(26) #define TXGBE_PX_MISC_IEN_MASK \ (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ - TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR) + TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ + TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC) /* Port cfg registers */ #define TXGBE_CFG_PORT_ST 0x14404 #define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) +#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3) +#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4) +#define TXGBE_CFG_VXLAN 0x14410 +#define TXGBE_CFG_VXLAN_GPE 0x14414 +#define TXGBE_CFG_GENEVE 0x14418 /* I2C registers */ #define TXGBE_I2C_BASE 0x14900 @@ -146,8 +163,11 @@ /*************************** Amber Lite Registers ****************************/ #define TXGBE_PX_PF_BME 0x4B8 #define TXGBE_AML_MAC_TX_CFG 0x11000 +#define TXGBE_AML_MAC_TX_CFG_TE BIT(0) #define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27) -#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28) +#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0) +#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2) +#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8) #define TXGBE_RDM_RSC_CTL 0x1200C #define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7) @@ -168,13 +188,15 @@ #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) -#define TXGBE_SP_MAX_TX_QUEUES 128 -#define TXGBE_SP_MAX_RX_QUEUES 128 -#define TXGBE_SP_RAR_ENTRIES 128 -#define TXGBE_SP_MC_TBL_SIZE 128 -#define TXGBE_SP_VFT_TBL_SIZE 128 -#define TXGBE_SP_RX_PB_SIZE 512 -#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_MAX_TXQ 128 +#define TXGBE_MAX_RXQ 128 +#define TXGBE_RAR_ENTRIES 128 +#define TXGBE_MC_TBL_SIZE 128 +#define TXGBE_VFT_TBL_SIZE 128 +#define TXGBE_RX_PB_SIZE 512 +#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ + +#define TXGBE_MAX_VFS_DRV_LIMIT 63 #define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 @@ -265,7 +287,7 @@ struct txgbe_fdir_filter { struct hlist_node fdir_node; union txgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; /* TX/RX descriptor defines */ @@ -292,6 +314,72 @@ void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); void txgbe_do_reset(struct net_device *netdev); +#define TXGBE_LINK_SPEED_10GB_FULL 4 +#define TXGBE_LINK_SPEED_25GB_FULL 0x10 + +#define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4 +#define TXGBE_SFF_FCPI4_LIMITING 0x3 +#define TXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define TXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define TXGBE_SFF_25GBASESR_CAPABLE 0x2 +#define TXGBE_SFF_25GBASELR_CAPABLE 0x3 +#define TXGBE_SFF_25GBASEER_CAPABLE 0x4 +#define TXGBE_SFF_25GBASECR_91FEC 0xB +#define TXGBE_SFF_25GBASECR_74FEC 0xC +#define TXGBE_SFF_25GBASECR_NOFEC 0xD + +#define TXGBE_PHY_FEC_RS BIT(0) +#define TXGBE_PHY_FEC_BASER BIT(1) +#define TXGBE_PHY_FEC_OFF BIT(2) +#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \ + TXGBE_PHY_FEC_BASER |\ + TXGBE_PHY_FEC_RS) + +#define FW_PHY_GET_LINK_CMD 0xC0 +#define FW_PHY_SET_LINK_CMD 0xC1 +#define FW_READ_SFP_INFO_CMD 0xC5 + +struct txgbe_sfp_id { + u8 identifier; /* A0H 0x00 */ + u8 com_1g_code; /* A0H 0x06 */ + u8 com_10g_code; /* A0H 0x03 */ + u8 com_25g_code; /* A0H 0x24 */ + u8 cable_spec; /* A0H 0x3C */ + u8 cable_tech; /* A0H 0x08 */ + u8 vendor_oui0; /* A0H 0x25 */ + u8 vendor_oui1; /* A0H 0x26 */ + u8 vendor_oui2; /* A0H 0x27 */ + u8 reserved[3]; +}; + +struct txgbe_hic_i2c_read { + struct wx_hic_hdr hdr; + struct txgbe_sfp_id id; +}; + +struct txgbe_hic_ephy_setlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 fec_mode; + u8 resv[4]; +}; + +struct txgbe_hic_ephy_getlink { + struct wx_hic_hdr hdr; + u8 speed; + u8 duplex; + u8 autoneg; + u8 flow_ctl; + u8 power; + u8 fec_mode; + u8 resv[6]; +}; + #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ @@ -329,6 +417,7 @@ struct txgbe_nodes { enum txgbe_misc_irqs { TXGBE_IRQ_LINK = 0, + TXGBE_IRQ_GPIO, TXGBE_IRQ_MAX }; @@ -350,12 +439,19 @@ struct txgbe { struct clk *clk; struct gpio_chip *gpio; unsigned int link_irq; + unsigned int gpio_irq; + u32 eicr; /* flow director */ struct hlist_head fdir_filter_list; union txgbe_atr_input fdir_mask; int fdir_filter_count; spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ + + DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); + __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + u8 link_port; }; #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 054abf283ab3..6011d7eae0c7 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -880,7 +880,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) dev_consume_skb_any(skbuf_dma->skb); netif_txq_completed_wake(txq, 1, len, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), - 2 * MAX_SKB_FRAGS); + 2); } /** @@ -914,7 +914,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) dma_dev = lp->tx_chan->device; sg_len = skb_shinfo(skb)->nr_frags + 1; - if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { + if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { netif_stop_queue(ndev); if (net_ratelimit()) netdev_warn(ndev, "TX ring unexpectedly full\n"); @@ -964,7 +964,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) txq = skb_get_tx_queue(lp->ndev, skb); netdev_tx_sent_queue(txq, skb->len); netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), - MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); + 1, 2); dmaengine_submit(dma_tx_desc); dma_async_issue_pending(lp->tx_chan); @@ -2980,7 +2980,7 @@ static int axienet_probe(struct platform_device *pdev) } } if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { - dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); + dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n"); ret = -EINVAL; goto cleanup_clk; } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index a2ab1c150822..e1e7f65553e7 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -394,16 +394,20 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); } -static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +static int ixp4xx_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct hwtstamp_config cfg; struct ixp46x_ts_regs *regs; struct port *port = netdev_priv(netdev); int ret; int ch; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + if (!cpu_is_ixp46x()) + return -EOPNOTSUPP; + + if (!netif_running(netdev)) + return -EINVAL; ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); if (ret) @@ -412,10 +416,10 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) ch = PORT2CHANNEL(port); regs = port->timesync_regs; - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON) return -ERANGE; - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: port->hwts_rx_en = 0; break; @@ -431,39 +435,45 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON; /* Clear out any old time stamps. */ __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } -static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +static int ixp4xx_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) { - struct hwtstamp_config cfg; struct port *port = netdev_priv(netdev); - cfg.flags = 0; - cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + if (!cpu_is_ixp46x()) + return -EOPNOTSUPP; + + if (!netif_running(netdev)) + return -EINVAL; + + cfg->flags = 0; + cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; switch (port->hwts_rx_en) { case 0: - cfg.rx_filter = HWTSTAMP_FILTER_NONE; + cfg->rx_filter = HWTSTAMP_FILTER_NONE; break; case PTP_SLAVE_MODE: - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; break; case PTP_MASTER_MODE: - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; break; default: WARN_ON_ONCE(1); return -ERANGE; } - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, @@ -985,21 +995,6 @@ static void eth_set_mcast_list(struct net_device *dev) } -static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - if (!netif_running(dev)) - return -EINVAL; - - if (cpu_is_ixp46x()) { - if (cmd == SIOCSHWTSTAMP) - return hwtstamp_set(dev, req); - if (cmd == SIOCGHWTSTAMP) - return hwtstamp_get(dev, req); - } - - return phy_mii_ioctl(dev->phydev, req, cmd); -} - /* ethtool support */ static void ixp4xx_get_drvinfo(struct net_device *dev, @@ -1433,9 +1428,11 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_change_mtu = ixp4xx_eth_change_mtu, .ndo_start_xmit = eth_xmit, .ndo_set_rx_mode = eth_set_mcast_list, - .ndo_eth_ioctl = eth_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, + .ndo_hwtstamp_get = ixp4xx_hwtstamp_get, + .ndo_hwtstamp_set = ixp4xx_hwtstamp_set, }; static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) |