diff options
Diffstat (limited to 'drivers/net/ethernet/meta')
-rw-r--r-- | drivers/net/ethernet/meta/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic.h | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_csr.h | 36 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_devlink.c | 258 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c | 178 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 500 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_fw.h | 56 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c | 335 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h | 48 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_irq.c | 142 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_mac.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_netdev.c | 52 | ||||
-rw-r--r-- | drivers/net/ethernet/meta/fbnic/fbnic_pci.c | 24 |
13 files changed, 1474 insertions, 183 deletions
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index 831921b9d4d5..3ba527514f1e 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -27,6 +27,7 @@ config FBNIC select NET_DEVLINK select PAGE_POOL select PHYLINK + select PLDMFW help This driver supports Meta Platforms Host Network Interface. diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 4ca7b99ef131..65815d4f379e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -19,6 +19,7 @@ struct fbnic_napi_vector; #define FBNIC_MAX_NAPI_VECTORS 128u +#define FBNIC_MBX_CMPL_SLOTS 4 struct fbnic_dev { struct device *dev; @@ -42,7 +43,7 @@ struct fbnic_dev { struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; struct fbnic_fw_cap fw_cap; - struct fbnic_fw_completion *cmpl_data; + struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS]; /* Lock protecting Tx Mailbox queue to prevent possible races */ spinlock_t fw_tx_lock; @@ -81,6 +82,9 @@ struct fbnic_dev { /* Local copy of hardware statistics */ struct fbnic_hw_stats hw_stats; + + /* Lock protecting access to hw_stats */ + spinlock_t hw_stats_lock; }; /* Reserve entry 0 in the MSI-X "others" array until we have filled all @@ -154,14 +158,14 @@ struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev); void fbnic_devlink_register(struct fbnic_dev *fbd); void fbnic_devlink_unregister(struct fbnic_dev *fbd); -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); +int fbnic_fw_request_mbx(struct fbnic_dev *fbd); +void fbnic_fw_free_mbx(struct fbnic_dev *fbd); void fbnic_hwmon_register(struct fbnic_dev *fbd); void fbnic_hwmon_unregister(struct fbnic_dev *fbd); -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); +int fbnic_pcs_request_irq(struct fbnic_dev *fbd); +void fbnic_pcs_free_irq(struct fbnic_dev *fbd); void fbnic_napi_name_irqs(struct fbnic_dev *fbd); int fbnic_napi_request_irq(struct fbnic_dev *fbd, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 3b12a0ab5906..36393a17d92d 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -397,6 +397,15 @@ enum { #define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1) #define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2) +#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */ +#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */ +#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */ +#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */ +#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */ +#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */ +#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */ +#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */ + #define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */ #define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0) enum { @@ -432,6 +441,11 @@ enum { #define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */ #define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */ #define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0) +#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */ +#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */ +#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */ +#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */ +#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */ #define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ /* Precision Time Protocol Registers */ @@ -485,6 +499,14 @@ enum { FBNIC_RXB_FIFO_INDICES = 8 }; +enum { + FBNIC_RXB_INTF_NET = 0, + FBNIC_RXB_INTF_RBT = 1, + /* Unused */ + /* Unused */ + FBNIC_RXB_INTF_INDICES = 4 +}; + #define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */ #define FBNIC_RXB_CT_SIZE_CNT 8 #define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0) @@ -796,8 +818,10 @@ enum { /* PUL User Registers */ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) #define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \ 0x3106e /* 0xc41b8 */ @@ -864,6 +888,12 @@ enum { #define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */ #define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */ +/* Tx Work Queue Statistics Registers */ +#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */ +#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */ +#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */ +#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */ + /* Tx Completion Queue Registers */ #define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */ #define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0) @@ -953,6 +983,12 @@ enum { FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2, }; +/* Rx Per CQ Statistics Counters */ +#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */ +#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */ +#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */ +#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */ + /* Rx Interrupt Manager Registers */ #define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */ #define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c index 0072d612215e..4c4938eedd7b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -3,10 +3,12 @@ #include <linux/unaligned.h> #include <linux/pci.h> +#include <linux/pldmfw.h> #include <linux/types.h> #include <net/devlink.h> #include "fbnic.h" +#include "fbnic_tlv.h" #define FBNIC_SN_STR_LEN 24 @@ -109,8 +111,262 @@ static int fbnic_devlink_info_get(struct devlink *devlink, return 0; } +static bool +fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pldmfw_desc_tlv *desc; + u32 anti_rollback_ver = 0; + struct devlink *devlink; + struct fbnic_dev *fbd; + struct pci_dev *pdev; + + /* First, use the standard PCI matching function */ + if (!pldmfw_op_pci_match_record(context, record)) + return false; + + pdev = to_pci_dev(context->dev); + fbd = pci_get_drvdata(pdev); + devlink = priv_to_devlink(fbd); + + /* If PCI match is successful, check for vendor-specific descriptors */ + list_for_each_entry(desc, &record->descs, entry) { + if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED) + continue; + + if (desc->size < 21 || desc->data[0] != 1 || + desc->data[1] != 15) + continue; + + if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0) + continue; + + anti_rollback_ver = get_unaligned_le32(desc->data + 17); + break; + } + + /* Compare versions and return error if they do not match */ + if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) { + char buf[128]; + + snprintf(buf, sizeof(buf), + "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!", + anti_rollback_ver, fbd->fw_cap.anti_rollback_version); + devlink_flash_update_status_notify(devlink, buf, + "Anti-Rollback", 0, 0); + + return false; + } + + return true; +} + +static int +fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component) +{ + struct fbnic_fw_completion *cmpl; + int err; + + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); + if (!cmpl) + return -ENOMEM; + + err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl, + component->identifier, + component->component_size); + if (err) + goto cmpl_free; + + /* Wait for firmware to ack firmware upgrade start */ + if (wait_for_completion_timeout(&cmpl->done, 10 * HZ)) + err = cmpl->result; + else + err = -ETIMEDOUT; + + fbnic_fw_clear_cmpl(fbd, cmpl); +cmpl_free: + fbnic_fw_put_cmpl(cmpl); + + return err; +} + +static int +fbnic_flash_component(struct pldmfw *context, + struct pldmfw_component *component) +{ + const u8 *data = component->component_data; + const u32 size = component->component_size; + struct fbnic_fw_completion *cmpl; + const char *component_name; + struct devlink *devlink; + struct fbnic_dev *fbd; + struct pci_dev *pdev; + u32 offset = 0; + u32 length = 0; + char buf[32]; + int err; + + pdev = to_pci_dev(context->dev); + fbd = pci_get_drvdata(pdev); + devlink = priv_to_devlink(fbd); + + switch (component->identifier) { + case QSPI_SECTION_CMRT: + component_name = "boot1"; + break; + case QSPI_SECTION_CONTROL_FW: + component_name = "boot2"; + break; + case QSPI_SECTION_OPTION_ROM: + component_name = "option-rom"; + break; + default: + snprintf(buf, sizeof(buf), "Unknown component ID %u!", + component->identifier); + devlink_flash_update_status_notify(devlink, buf, NULL, 0, + size); + return -EINVAL; + } + + /* Once firmware receives the request to start upgrading it responds + * with two messages: + * 1. An ACK that it received the message and possible error code + * indicating that an upgrade is not currently possible. + * 2. A request for the first chunk of data + * + * Setup completions for write before issuing the start message so + * the driver can catch both messages. + */ + cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ); + if (!cmpl) + return -ENOMEM; + + err = fbnic_mbx_set_cmpl(fbd, cmpl); + if (err) + goto cmpl_free; + + devlink_flash_update_timeout_notify(devlink, "Initializing", + component_name, 15); + err = fbnic_flash_start(fbd, component); + if (err) + goto err_no_msg; + + while (offset < size) { + if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) { + err = -ETIMEDOUT; + break; + } + + err = cmpl->result; + if (err) + break; + + /* Verify firmware is requesting the next chunk in the seq. */ + if (cmpl->u.fw_update.offset != offset + length) { + err = -EFAULT; + break; + } + + offset = cmpl->u.fw_update.offset; + length = cmpl->u.fw_update.length; + + if (length > TLV_MAX_DATA || offset + length > size) { + err = -EFAULT; + break; + } + + devlink_flash_update_status_notify(devlink, "Flashing", + component_name, + offset, size); + + /* Mailbox will set length to 0 once it receives the finish + * message. + */ + if (!length) + continue; + + reinit_completion(&cmpl->done); + err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length, + 0); + if (err) + break; + } + + if (err) { + fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err); +err_no_msg: + snprintf(buf, sizeof(buf), "Mailbox encountered error %d!", + err); + devlink_flash_update_status_notify(devlink, buf, + component_name, 0, 0); + } + + fbnic_fw_clear_cmpl(fbd, cmpl); +cmpl_free: + fbnic_fw_put_cmpl(cmpl); + + return err; +} + +static const struct pldmfw_ops fbnic_pldmfw_ops = { + .match_record = fbnic_pldm_match_record, + .flash_component = fbnic_flash_component, +}; + +static int +fbnic_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct fbnic_dev *fbd = devlink_priv(devlink); + const struct firmware *fw = params->fw; + struct device *dev = fbd->dev; + struct pldmfw context; + char *err_msg; + int err; + + context.ops = &fbnic_pldmfw_ops; + context.dev = dev; + + err = pldmfw_flash_image(&context, fw); + if (err) { + switch (err) { + case -EINVAL: + err_msg = "Invalid image"; + break; + case -EOPNOTSUPP: + err_msg = "Unsupported image"; + break; + case -ENOMEM: + err_msg = "Out of memory"; + break; + case -EFAULT: + err_msg = "Invalid header"; + break; + case -ENOENT: + err_msg = "No matching record"; + break; + case -ENODEV: + err_msg = "No matching device"; + break; + case -ETIMEDOUT: + err_msg = "Timed out waiting for reply"; + break; + default: + err_msg = "Unknown error"; + break; + } + + NL_SET_ERR_MSG_FMT_MOD(extack, + "Failed to flash PLDM Image: %s (error: %d)", + err_msg, err); + } + + return err; +} + static const struct devlink_ops fbnic_devlink_ops = { - .info_get = fbnic_devlink_info_get, + .info_get = fbnic_devlink_info_get, + .flash_update = fbnic_devlink_flash_update, }; void fbnic_devlink_free(struct fbnic_dev *fbd) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 0a751a2aaf73..5c7556c8c4c5 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -27,6 +27,19 @@ struct fbnic_stat { FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat) static const struct fbnic_stat fbnic_gstrings_hw_stats[] = { + /* TTI */ + FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames), + FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes), + FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames), + FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes), + FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames), + FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes), + + /* TMI */ + FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req), + FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts), + FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts), + /* RPC */ FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype), FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr), @@ -39,7 +52,64 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = { }; #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats) -#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN + +#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = { + FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err), + FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err), + + FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames), + FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes), +}; + +#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \ + ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats) + +#define FBNIC_RXB_FIFO_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = { + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn), + FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level), +}; + +#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats) + +#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat) + +static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = { + FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames), + FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes), + FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames), + FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes), +}; + +#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \ + ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats) + +#define FBNIC_HW_Q_STAT(name, stat) \ + FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value) + +static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = { + FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err), + FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop), + FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop), +}; + +#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats) +#define FBNIC_HW_STATS_LEN \ + (FBNIC_HW_FIXED_STATS_LEN + \ + FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \ + FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \ + FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \ + FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES) static void fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) @@ -298,31 +368,125 @@ err_free_clone: return err; } -static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) +static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx) +{ + const struct fbnic_stat *stat; + int i; + + stat = fbnic_gstrings_rxb_enqueue_stats; + for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx) +{ + const struct fbnic_stat *stat; + int i; + + stat = fbnic_gstrings_rxb_fifo_stats; + for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx) { + const struct fbnic_stat *stat; int i; + stat = fbnic_gstrings_rxb_dequeue_stats; + for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++) + ethtool_sprintf(data, stat->string, idx); +} + +static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + const struct fbnic_stat *stat; + int i, idx; + switch (sset) { case ETH_SS_STATS: - for (i = 0; i < FBNIC_HW_STATS_LEN; i++) + for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++) ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string); + + for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) + fbnic_get_rxb_enqueue_strings(&data, i); + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_strings(&data, i); + + for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) + fbnic_get_rxb_dequeue_strings(&data, i); + + for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) { + stat = fbnic_gstrings_hw_q_stats; + + for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++) + ethtool_sprintf(&data, stat->string, idx); + } break; } } +static void fbnic_report_hw_stats(const struct fbnic_stat *stat, + const void *base, int len, u64 **data) +{ + while (len--) { + u8 *curr = (u8 *)base + stat->offset; + + **data = *(u64 *)curr; + + stat++; + (*data)++; + } +} + static void fbnic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct fbnic_net *fbn = netdev_priv(dev); - const struct fbnic_stat *stat; + struct fbnic_dev *fbd = fbn->fbd; int i; fbnic_get_hw_stats(fbn->fbd); - for (i = 0; i < FBNIC_HW_STATS_LEN; i++) { - stat = &fbnic_gstrings_hw_stats[i]; - data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset); + spin_lock(&fbd->hw_stats_lock); + fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats, + FBNIC_HW_FIXED_STATS_LEN, &data); + + for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) { + const struct fbnic_rxb_enqueue_stats *enq; + + enq = &fbd->hw_stats.rxb.enq[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats, + enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) { + const struct fbnic_rxb_fifo_stats *fifo; + + fifo = &fbd->hw_stats.rxb.fifo[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats, + fifo, FBNIC_HW_RXB_FIFO_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) { + const struct fbnic_rxb_dequeue_stats *deq; + + deq = &fbd->hw_stats.rxb.deq[i]; + fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats, + deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN, + &data); + } + + for (i = 0; i < FBNIC_MAX_QUEUES; i++) { + const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i]; + + fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q, + FBNIC_HW_Q_STATS_LEN, &data); } + spin_unlock(&fbd->hw_stats_lock); } static int fbnic_get_sset_count(struct net_device *dev, int sset) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 88db3dacb940..e2368075ab8c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + /* Write the upper 32b and then the lower 32b. Doing this the + * FW can then read lower, upper, lower to verify that the state + * of the descriptor wasn't changed mid-transaction. + */ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); fw_wrfl(fbd); fw_wr32(fbd, desc_offset, lower_32_bits(desc)); } +static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx, u32 desc) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + + /* For initialization we write the lower 32b of the descriptor first. + * This way we can set the state to mark it invalid before we clear the + * upper 32b. + */ + fw_wr32(fbd, desc_offset, desc); + fw_wrfl(fbd); + fw_wr32(fbd, desc_offset + 1, 0); +} + static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); @@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) return desc; } -static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int desc_idx; + /* Disable DMA transactions from the device, + * and flush any transactions triggered during cleaning + */ + switch (mbx_idx) { + case FBNIC_IPC_MBX_RX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH); + break; + case FBNIC_IPC_MBX_TX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH); + break; + } + + wrfl(fbd); + /* Initialize first descriptor to all 0s. Doing this gives us a * solid stop for the firmware to hit when it is done looping * through the ring. */ - __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0); - - fw_wrfl(fbd); + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0); /* We then fill the rest of the ring starting at the end and moving * back toward descriptor 0 with skip descriptors that have no * length nor address, and tell the firmware that they can skip * them and just move past them to the one we initialized to 0. */ - for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) { - __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx, - FBNIC_IPC_MBX_DESC_FW_CMPL | - FBNIC_IPC_MBX_DESC_HOST_CMPL); - fw_wrfl(fbd); - } + for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx, + FBNIC_IPC_MBX_DESC_FW_CMPL | + FBNIC_IPC_MBX_DESC_HOST_CMPL); } void fbnic_mbx_init(struct fbnic_dev *fbd) @@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd) wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_init_desc_ring(fbd, i); + fbnic_mbx_reset_desc_ring(fbd, i); } static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, @@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int i; - fbnic_mbx_init_desc_ring(fbd, mbx_idx); + fbnic_mbx_reset_desc_ring(fbd, mbx_idx); for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); @@ -207,6 +237,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd, return err; } +static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + int free = -EXFULL; + int i; + + if (!tx_mbx->ready) + return -ENODEV; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (!fbd->cmpl_data[i]) + free = i; + else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type) + return -EEXIST; + } + + if (free == -EXFULL) + return -EXFULL; + + fbd->cmpl_data[free] = cmpl_data; + + return 0; +} + +static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + int i; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (fbd->cmpl_data[i] == cmpl_data) { + fbd->cmpl_data[i] = NULL; + break; + } + } +} + static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) { struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; @@ -228,6 +296,19 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) tx_mbx->head = head; } +int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return err; +} + static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, struct fbnic_tlv_msg *msg, struct fbnic_fw_completion *cmpl_data) @@ -236,23 +317,20 @@ static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd, int err; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - - /* If we are already waiting on a completion then abort */ - if (cmpl_data && fbd->cmpl_data) { - err = -EBUSY; - goto unlock_mbx; + if (cmpl_data) { + err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data); + if (err) + goto unlock_mbx; } - /* Record completion location and submit request */ - if (cmpl_data) - fbd->cmpl_data = cmpl_data; - err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); - /* If msg failed then clear completion data for next caller */ + /* If we successfully reserved a completion and msg failed + * then clear completion data for next caller + */ if (err && cmpl_data) - fbd->cmpl_data = NULL; + fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data); unlock_mbx: spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); @@ -274,12 +352,18 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type) { struct fbnic_fw_completion *cmpl_data = NULL; unsigned long flags; + int i; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) { - cmpl_data = fbd->cmpl_data; - kref_get(&fbd->cmpl_data->ref_count); + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + if (fbd->cmpl_data[i] && + fbd->cmpl_data[i]->msg_type == msg_type) { + cmpl_data = fbd->cmpl_data[i]; + kref_get(&cmpl_data->ref_count); + break; + } } + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); return cmpl_data; @@ -322,67 +406,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) return err; } -/** - * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message - * @fbd: FBNIC device structure - * - * Return: NULL on failure to allocate, error pointer on error, or pointer - * to new TLV test message. - * - * Sends a single TLV header indicating the host wants the firmware to - * confirm the capabilities and version. - **/ -static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd) -{ - int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); - - /* Return 0 if we are not calling this on ASIC */ - return (err == -EOPNOTSUPP) ? 0 : err; -} - -static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; - /* This is a one time init, so just exit if it is completed */ - if (mbx->ready) - return; - mbx->ready = true; switch (mbx_idx) { case FBNIC_IPC_MBX_RX_IDX: + /* Enable DMA writes from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); + /* Make sure we have a page for the FW to write to */ fbnic_mbx_alloc_rx_msgs(fbd); break; case FBNIC_IPC_MBX_TX_IDX: - /* Force version to 1 if we successfully requested an update - * from the firmware. This should be overwritten once we get - * the actual version from the firmware in the capabilities - * request message. - */ - if (!fbnic_fw_xmit_cap_msg(fbd) && - !fbd->fw_cap.running.mgmt.version) - fbd->fw_cap.running.mgmt.version = 1; + /* Enable DMA reads from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); break; } } -static void fbnic_mbx_postinit(struct fbnic_dev *fbd) +static bool fbnic_mbx_event(struct fbnic_dev *fbd) { - int i; - - /* We only need to do this on the first interrupt following init. + /* We only need to do this on the first interrupt following reset. * this primes the mailbox so that we will have cleared all the * skip descriptors. */ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) - return; + return false; wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); - for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_postinit_desc_ring(fbd, i); + return true; } /** @@ -460,6 +518,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = { FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION), FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION), FBNIC_TLV_ATTR_LAST }; @@ -582,6 +641,9 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present) fbd->fw_cap.all_multi = all_multi; + fbd->fw_cap.anti_rollback_version = + fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION); + return 0; } @@ -704,6 +766,188 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) dev_warn(fbd->dev, "Failed to send heartbeat message\n"); } +int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data, + unsigned int id, unsigned int len) +{ + struct fbnic_tlv_msg *msg; + int err; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + if (!len) + return -EINVAL; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ); + if (!msg) + return -ENOMEM; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH, + len); + if (err) + goto free_message; + + err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data); + if (err) + goto free_message; + + return 0; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = { + FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + s32 err; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Check for errors */ + err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR); + + cmpl_data->result = err; + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + +int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, + const u8 *data, u32 offset, u16 length, + int cancel_error) +{ + struct fbnic_tlv_msg *msg; + int err; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP); + if (!msg) + return -ENOMEM; + + /* Report error to FW to cancel upgrade */ + if (cancel_error) { + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR, + cancel_error); + if (err) + goto free_message; + } + + if (data) { + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET, + offset); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH, + length); + if (err) + goto free_message; + + err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA, + data + offset, length); + if (err) + goto free_message; + } + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + return 0; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = { + FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET), + FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_write_chunk_req(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + u32 offset; + u32 length; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Pull length/offset pair and mark it as complete */ + offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET); + length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH); + cmpl_data->u.fw_update.offset = offset; + cmpl_data->u.fw_update.length = length; + + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + +static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = { + FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_fw_completion *cmpl_data; + struct fbnic_dev *fbd = opaque; + u32 msg_type; + s32 err; + + /* Verify we have a completion pointer */ + msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ; + cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type); + if (!cmpl_data) + return -ENOSPC; + + /* Check for errors */ + err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR); + + /* Close out update by incrementing offset by length which should + * match the total size of the component. Set length to 0 since no + * new chunks will be requested. + */ + cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length; + cmpl_data->u.fw_update.length = 0; + + cmpl_data->result = err; + complete(&cmpl_data->done); + fbnic_fw_put_cmpl(cmpl_data); + + return 0; +} + /** * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request * @fbd: FBNIC device structure @@ -788,6 +1032,15 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { fbnic_fw_parse_ownership_resp), FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, fbnic_fw_parse_heartbeat_resp), + FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP, + fbnic_fw_start_upgrade_resp_index, + fbnic_fw_parse_fw_start_upgrade_resp), + FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ, + fbnic_fw_write_chunk_req_index, + fbnic_fw_parse_fw_write_chunk_req), + FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ, + fbnic_fw_finish_upgrade_req_index, + fbnic_fw_parse_fw_finish_upgrade_req), FBNIC_TLV_PARSER(TSENE_READ_RESP, fbnic_tsene_read_resp_index, fbnic_fw_parse_tsene_read_resp), @@ -859,7 +1112,7 @@ next_page: void fbnic_mbx_poll(struct fbnic_dev *fbd) { - fbnic_mbx_postinit(fbd); + fbnic_mbx_event(fbd); fbnic_mbx_process_tx_msgs(fbd); fbnic_mbx_process_rx_msgs(fbd); @@ -867,60 +1120,103 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd) int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) { - struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; + unsigned long timeout = jiffies + 10 * HZ + 1; + int err, i; - /* Immediate fail if BAR4 isn't there */ - if (!fbnic_fw_present(fbd)) - return -ENODEV; + do { + if (!time_is_after_jiffies(timeout)) + return -ETIMEDOUT; - tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - while (!tx_mbx->ready && --attempts) { /* Force the firmware to trigger an interrupt response to * avoid the mailbox getting stuck closed if the interrupt * is reset. */ - fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); + fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); - msleep(200); + /* Immediate fail if BAR4 went away */ + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msleep(20); + } while (!fbnic_mbx_event(fbd)); - fbnic_mbx_poll(fbd); + /* FW has shown signs of life. Enable DMA and start Tx/Rx */ + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_init_desc_ring(fbd, i); + + /* Request an update from the firmware. This should overwrite + * mgmt.version once we get the actual version from the firmware + * in the capabilities request message. + */ + err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); + if (err) + goto clean_mbx; + + /* Use "1" to indicate we entered the state waiting for a response */ + fbd->fw_cap.running.mgmt.version = 1; + + return 0; +clean_mbx: + /* Cleanup Rx buffers and disable mailbox */ + fbnic_mbx_clean(fbd); + return err; +} + +static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data) +{ + cmpl_data->result = -EPIPE; + complete(&cmpl_data->done); +} + +static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd) +{ + int i; + + for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) { + struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i]; + + if (cmpl_data) + __fbnic_fw_evict_cmpl(cmpl_data); } - return attempts ? 0 : -ETIMEDOUT; + memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data)); } void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) { + unsigned long timeout = jiffies + 10 * HZ + 1; struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; - u8 count = 0; - - /* Nothing to do if there is no mailbox */ - if (!fbnic_fw_present(fbd)) - return; + u8 tail; /* Record current Rx stats */ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - /* Nothing to do if mailbox never got to ready */ - if (!tx_mbx->ready) - return; + spin_lock_irq(&fbd->fw_tx_lock); + + /* Clear ready to prevent any further attempts to transmit */ + tx_mbx->ready = false; + + /* Read tail to determine the last tail state for the ring */ + tail = tx_mbx->tail; + + /* Flush any completions as we are no longer processing Rx */ + fbnic_mbx_evict_all_cmpl(fbd); + + spin_unlock_irq(&fbd->fw_tx_lock); /* Give firmware time to process packet, - * we will wait up to 10 seconds which is 50 waits of 200ms. + * we will wait up to 10 seconds which is 500 waits of 20ms. */ do { u8 head = tx_mbx->head; - if (head == tx_mbx->tail) + /* Tx ring is empty once head == tail */ + if (head == tail) break; - msleep(200); + msleep(20); fbnic_mbx_process_tx_msgs(fbd); - - count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; - } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); + } while (time_is_after_jiffies(timeout)); } void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, @@ -936,20 +1232,28 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, fw_version, str_sz); } -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl, - u32 msg_type) +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type) { - fw_cmpl->msg_type = msg_type; - init_completion(&fw_cmpl->done); - kref_init(&fw_cmpl->ref_count); + struct fbnic_fw_completion *cmpl; + + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) + return NULL; + + cmpl->msg_type = msg_type; + init_completion(&cmpl->done); + kref_init(&cmpl->ref_count); + + return cmpl; } -void fbnic_fw_clear_compl(struct fbnic_dev *fbd) +void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *fw_cmpl) { unsigned long flags; spin_lock_irqsave(&fbd->fw_tx_lock, flags); - fbd->cmpl_data = NULL; + fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl); spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index a3618e7826c2..08bc4b918de7 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -42,6 +42,7 @@ struct fbnic_fw_cap { u8 all_multi : 1; u8 link_speed; u8 link_fec; + u32 anti_rollback_version; }; struct fbnic_fw_completion { @@ -51,6 +52,10 @@ struct fbnic_fw_completion { int result; union { struct { + u32 offset; + u32 length; + } fw_update; + struct { s32 millivolts; s32 millidegrees; } tsene; @@ -59,17 +64,25 @@ struct fbnic_fw_completion { void fbnic_mbx_init(struct fbnic_dev *fbd); void fbnic_mbx_clean(struct fbnic_dev *fbd); +int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data); void fbnic_mbx_poll(struct fbnic_dev *fbd); int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd); void fbnic_mbx_flush_tx(struct fbnic_dev *fbd); int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); +int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data, + unsigned int id, unsigned int len); +int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd, + const u8 *data, u32 offset, u16 length, + int cancel_error); int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, struct fbnic_fw_completion *cmpl_data); -void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data, - u32 msg_type); -void fbnic_fw_clear_compl(struct fbnic_dev *fbd); +struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type); +void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd, + struct fbnic_fw_completion *cmpl_data); void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data); #define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \ @@ -86,6 +99,15 @@ do { \ #define fbnic_mk_fw_ver_str(_rev_id, _str) \ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str)) +enum { + QSPI_SECTION_CMRT = 0, + QSPI_SECTION_CONTROL_FW = 1, + QSPI_SECTION_UCODE = 2, + QSPI_SECTION_OPTION_ROM = 3, + QSPI_SECTION_USER = 4, + QSPI_SECTION_INVALID, +}; + #define FW_HEARTBEAT_PERIOD (10 * HZ) enum { @@ -95,6 +117,12 @@ enum { FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13, FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14, FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15, + FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22, + FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23, + FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24, + FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25, + FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28, + FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29, FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C, FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D, }; @@ -122,6 +150,7 @@ enum { FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10, FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12, + FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15, FBNIC_FW_CAP_RESP_MSG_MAX }; @@ -149,4 +178,25 @@ enum { FBNIC_FW_OWNERSHIP_FLAG = 0x0, FBNIC_FW_OWNERSHIP_MSG_MAX }; + +enum { + FBNIC_FW_START_UPGRADE_ERROR = 0x0, + FBNIC_FW_START_UPGRADE_SECTION = 0x1, + FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2, + FBNIC_FW_START_UPGRADE_MSG_MAX +}; + +enum { + FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0, + FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1, + FBNIC_FW_WRITE_CHUNK_DATA = 0x2, + FBNIC_FW_WRITE_CHUNK_ERROR = 0x3, + FBNIC_FW_WRITE_CHUNK_MSG_MAX +}; + +enum { + FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0, + FBNIC_FW_FINISH_UPGRADE_MSG_MAX +}; + #endif /* _FBNIC_FW_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c index 89ac6bc8c7fc..4223d8100e64 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c @@ -70,6 +70,100 @@ static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset, stat->u.old_reg_value_64 = new_reg_value; } +static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TMI_ILLEGAL_PTP_REQS, + &tmi->ptp_illegal_req); + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); + fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); +} + +static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TMI_ILLEGAL_PTP_REQS, + &tmi->ptp_illegal_req); + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts); + fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts); +} + +static void fbnic_get_tmi_stats(struct fbnic_dev *fbd, + struct fbnic_tmi_stats *tmi) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes); +} + +static void fbnic_reset_tti_stats(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TTI_CM_DROP_PKTS, + &tti->cm_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TTI_CM_DROP_BYTE_L, + 1, + &tti->cm_drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TTI_FRAME_DROP_PKTS, + &tti->frame_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, + 1, + &tti->frame_drop.bytes); + + fbnic_hw_stat_rst32(fbd, + FBNIC_TCE_TBI_DROP_PKTS, + &tti->tbi_drop.frames); + fbnic_hw_stat_rst64(fbd, + FBNIC_TCE_TBI_DROP_BYTE_L, + 1, + &tti->tbi_drop.bytes); +} + +static void fbnic_get_tti_stats32(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TTI_CM_DROP_PKTS, + &tti->cm_drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TTI_FRAME_DROP_PKTS, + &tti->frame_drop.frames); + + fbnic_hw_stat_rd32(fbd, + FBNIC_TCE_TBI_DROP_PKTS, + &tti->tbi_drop.frames); +} + +static void fbnic_get_tti_stats(struct fbnic_dev *fbd, + struct fbnic_tti_stats *tti) +{ + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TTI_CM_DROP_BYTE_L, + 1, + &tti->cm_drop.bytes); + + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TTI_FRAME_DROP_BYTE_L, + 1, + &tti->frame_drop.bytes); + + fbnic_hw_stat_rd64(fbd, + FBNIC_TCE_TBI_DROP_BYTE_L, + 1, + &tti->tbi_drop.bytes); +} + static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd, struct fbnic_rpc_stats *rpc) { @@ -117,6 +211,221 @@ static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd, &rpc->ovr_size_err); } +static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), + &fifo->drop.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, + &fifo->drop.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), + &fifo->trunc.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, + &fifo->trunc.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), + &fifo->trans_drop); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), + &fifo->trans_ecn); + + fifo->level.u.old_reg_value_32 = 0; +} + +static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), + &enq->drbo.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, + &enq->drbo.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), + &enq->integrity_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i), + &enq->mac_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i), + &enq->parser_err); + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i), + &enq->frm_err); +} + +static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), + &deq->intf.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, + &deq->intf.bytes); + + fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), + &deq->pbuf.frames); + fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, + &deq->pbuf.bytes); +} + +static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]); + fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i), + &fifo->drop.frames); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i), + &fifo->trunc.frames); + + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i), + &fifo->trans_drop); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i), + &fifo->trans_ecn); + + fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i)); +} + +static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_fifo_stats *fifo) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1, + &fifo->drop.bytes); + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1, + &fifo->trunc.bytes); + + fbnic_get_rxb_fifo_stats32(fbd, i, fifo); +} + +static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i), + &enq->drbo.frames); + + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i), + &enq->integrity_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i), + &enq->mac_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i), + &enq->parser_err); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i), + &enq->frm_err); +} + +static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_enqueue_stats *enq) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4, + &enq->drbo.bytes); + + fbnic_get_rxb_enq_stats32(fbd, i, enq); +} + +static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i), + &deq->intf.frames); + fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i), + &deq->pbuf.frames); +} + +static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i, + struct fbnic_rxb_dequeue_stats *deq) +{ + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4, + &deq->intf.bytes); + fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4, + &deq->pbuf.bytes); + + fbnic_get_rxb_deq_stats32(fbd, i, deq); +} + +static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]); + fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_get_rxb_stats(struct fbnic_dev *fbd, + struct fbnic_rxb_stats *rxb) +{ + int i; + + for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) + fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]); + + for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) { + fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]); + fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]); + } +} + +static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + int i; + + for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { + u32 base = FBNIC_QUEUE(i); + + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, + &hw_q->rde_pkt_err); + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, + &hw_q->rde_pkt_cq_drop); + fbnic_hw_stat_rst32(fbd, + base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, + &hw_q->rde_pkt_bdq_drop); + } +} + +static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + int i; + + for (i = 0; i < fbd->max_num_queues; i++, hw_q++) { + u32 base = FBNIC_QUEUE(i); + + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_PKT_ERR_CNT, + &hw_q->rde_pkt_err); + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_CQ_DROP_CNT, + &hw_q->rde_pkt_cq_drop); + fbnic_hw_stat_rd32(fbd, + base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT, + &hw_q->rde_pkt_bdq_drop); + } +} + +void fbnic_get_hw_q_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q) +{ + spin_lock(&fbd->hw_stats_lock); + fbnic_get_hw_rxq_stats32(fbd, hw_q); + spin_unlock(&fbd->hw_stats_lock); +} + static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd, struct fbnic_pcie_stats *pcie) { @@ -203,18 +512,40 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd, void fbnic_reset_hw_stats(struct fbnic_dev *fbd) { + spin_lock(&fbd->hw_stats_lock); + fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi); + fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti); fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc); + fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb); + fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q); fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie); + spin_unlock(&fbd->hw_stats_lock); } -void fbnic_get_hw_stats32(struct fbnic_dev *fbd) +static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd) { + fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi); + fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti); fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc); + fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb); + fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q); +} + +void fbnic_get_hw_stats32(struct fbnic_dev *fbd) +{ + spin_lock(&fbd->hw_stats_lock); + __fbnic_get_hw_stats32(fbd); + spin_unlock(&fbd->hw_stats_lock); } void fbnic_get_hw_stats(struct fbnic_dev *fbd) { - fbnic_get_hw_stats32(fbd); + spin_lock(&fbd->hw_stats_lock); + __fbnic_get_hw_stats32(fbd); + fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi); + fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti); + fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb); fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie); + spin_unlock(&fbd->hw_stats_lock); } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h index 78df56b87745..07e54bb75bf3 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h @@ -17,6 +17,11 @@ struct fbnic_stat_counter { bool reported; }; +struct fbnic_hw_stat { + struct fbnic_stat_counter frames; + struct fbnic_stat_counter bytes; +}; + struct fbnic_eth_mac_stats { struct fbnic_stat_counter FramesTransmittedOK; struct fbnic_stat_counter FramesReceivedOK; @@ -37,12 +42,49 @@ struct fbnic_mac_stats { struct fbnic_eth_mac_stats eth_mac; }; +struct fbnic_tmi_stats { + struct fbnic_hw_stat drop; + struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts; +}; + +struct fbnic_tti_stats { + struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop; +}; + struct fbnic_rpc_stats { struct fbnic_stat_counter unkn_etype, unkn_ext_hdr; struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp; struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err; }; +struct fbnic_rxb_enqueue_stats { + struct fbnic_hw_stat drbo; + struct fbnic_stat_counter integrity_err, mac_err; + struct fbnic_stat_counter parser_err, frm_err; +}; + +struct fbnic_rxb_fifo_stats { + struct fbnic_hw_stat drop, trunc; + struct fbnic_stat_counter trans_drop, trans_ecn; + struct fbnic_stat_counter level; +}; + +struct fbnic_rxb_dequeue_stats { + struct fbnic_hw_stat intf, pbuf; +}; + +struct fbnic_rxb_stats { + struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES]; + struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES]; + struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES]; +}; + +struct fbnic_hw_q_stats { + struct fbnic_stat_counter rde_pkt_err; + struct fbnic_stat_counter rde_pkt_cq_drop; + struct fbnic_stat_counter rde_pkt_bdq_drop; +}; + struct fbnic_pcie_stats { struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword; struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword; @@ -55,13 +97,19 @@ struct fbnic_pcie_stats { struct fbnic_hw_stats { struct fbnic_mac_stats mac; + struct fbnic_tmi_stats tmi; + struct fbnic_tti_stats tti; struct fbnic_rpc_stats rpc; + struct fbnic_rxb_stats rxb; + struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES]; struct fbnic_pcie_stats pcie; }; u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset); void fbnic_reset_hw_stats(struct fbnic_dev *fbd); +void fbnic_get_hw_q_stats(struct fbnic_dev *fbd, + struct fbnic_hw_q_stats *hw_q); void fbnic_get_hw_stats32(struct fbnic_dev *fbd); void fbnic_get_hw_stats(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c index 1bbc0e56f3a0..1c88a2bf3a7a 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -19,69 +19,105 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } +static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector) +{ + int err; + + /* Initialize mailbox and attempt to poll it into ready state */ + fbnic_mbx_init(fbd); + err = fbnic_mbx_poll_tx_ready(fbd); + if (err) { + dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + return err; + } + + /* Enable interrupt and unmask the vector */ + enable_irq(vector); + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return 0; +} + /** - * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox + * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox * @fbd: Pointer to device to initialize * - * This function will initialize the firmware mailbox rings, enable the IRQ - * and initialize the communication between the Firmware and the host. The - * firmware is expected to respond to the initialization by sending an - * interrupt essentially notifying the host that it has seen the - * initialization and is now synced up. + * This function will allocate the IRQ and then reinitialize the mailbox + * starting communication between the host and firmware. * * Return: non-zero on failure. **/ -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd) +int fbnic_fw_request_mbx(struct fbnic_dev *fbd) { - u32 vector = fbd->fw_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; + + WARN_ON(fbd->fw_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); + if (vector < 0) + return vector; /* Request the IRQ for FW Mailbox vector. */ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr, - IRQF_ONESHOT, dev_name(fbd->dev), fbd); + IRQF_ONESHOT | IRQF_NO_AUTOEN, + dev_name(fbd->dev), fbd); if (err) return err; /* Initialize mailbox and attempt to poll it into ready state */ - fbnic_mbx_init(fbd); - err = fbnic_mbx_poll_tx_ready(fbd); - if (err) { - dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + err = __fbnic_fw_enable_mbx(fbd, vector); + if (err) free_irq(vector, fbd); - return err; - } - /* Enable interrupts */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + fbd->fw_msix_vector = vector; - return 0; + return err; } /** - * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state - * @fbd: Pointer to device to disable + * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state + * @fbd: Pointer to device * - * This function will disable the mailbox interrupt, free any messages still - * in the mailbox and place it into a standby state. The firmware is - * expected to see the update and assume that the host is in the reset state. + * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask + * and synchronize the IRQ, and then clean up the rings. **/ -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) +static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) { - /* Disable interrupt and free vector */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); + /* Disable interrupt and synchronize the IRQ */ + disable_irq(fbd->fw_msix_vector); - /* Free the vector */ - free_irq(fbd->fw_msix_vector, fbd); + /* Mask the vector */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); /* Make sure disabling logs message is sent, must be done here to * avoid risk of completing without a running interrupt. */ fbnic_mbx_flush_tx(fbd); - - /* Reset the mailboxes to the initialized state */ fbnic_mbx_clean(fbd); } +/** + * fbnic_fw_free_mbx - Disable mailbox and place it in standby state + * @fbd: Pointer to device to disable + * + * This function will disable the mailbox interrupt, free any messages still + * in the mailbox and place it into a disabled state. The firmware is + * expected to see the update and assume that the host is in the reset state. + **/ +void fbnic_fw_free_mbx(struct fbnic_dev *fbd) +{ + /* Vector has already been freed */ + if (!fbd->fw_msix_vector) + return; + + fbnic_fw_disable_mbx(fbd); + + /* Free the vector */ + free_irq(fbd->fw_msix_vector, fbd); + fbd->fw_msix_vector = 0; +} + static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) { struct fbnic_dev *fbd = data; @@ -101,7 +137,7 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) } /** - * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link + * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link * @fbd: Pointer to device to initialize * * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ @@ -109,41 +145,61 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) * * Return: non-zero on failure. **/ -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd) +int fbnic_pcs_request_irq(struct fbnic_dev *fbd) { - u32 vector = fbd->pcs_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; - /* Request the IRQ for MAC link vector. - * Map MAC cause to it, and unmask it + WARN_ON(fbd->pcs_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); + if (vector < 0) + return vector; + + /* Request the IRQ for PCS link vector. + * Map PCS cause to it, and unmask it */ err = request_irq(vector, &fbnic_pcs_msix_intr, 0, fbd->netdev->name, fbd); if (err) return err; + /* Map and enable interrupt, unmask vector after link is configured */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); + fbd->pcs_msix_vector = vector; + return 0; } /** - * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping + * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping * @fbd: Pointer to device that is stopping * - * This function undoes the work done in fbnic_pcs_irq_enable and prepares + * This function undoes the work done in fbnic_pcs_request_irq and prepares * the device to no longer receive traffic on the host interface. **/ -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) +void fbnic_pcs_free_irq(struct fbnic_dev *fbd) { + /* Vector has already been freed */ + if (!fbd->pcs_msix_vector) + return; + /* Disable interrupt */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY); + fbnic_wrfl(fbd); + + /* Synchronize IRQ to prevent race that would unmask vector */ + synchronize_irq(fbd->pcs_msix_vector); + + /* Mask the vector */ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); /* Free the vector */ free_irq(fbd->pcs_msix_vector, fbd); + fbd->pcs_msix_vector = 0; } void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr) @@ -226,9 +282,6 @@ void fbnic_free_irqs(struct fbnic_dev *fbd) { struct pci_dev *pdev = to_pci_dev(fbd->dev); - fbd->pcs_msix_vector = 0; - fbd->fw_msix_vector = 0; - fbd->num_irqs = 0; pci_free_irq_vectors(pdev); @@ -254,8 +307,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd) fbd->num_irqs = num_irqs; - fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); - fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); - return 0; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 14291401f463..10e108c1fcd0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd) fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); - - /* Enable XALI AR/AW outbound */ - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, - FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, - FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); } static void fbnic_mac_init_qm(struct fbnic_dev *fbd) @@ -693,13 +687,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, int err = 0, retries = 5; s32 *sensor; - fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL); + fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP); if (!fw_cmpl) return -ENOMEM; - /* Initialize completion and queue it for FW to process */ - fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP); - switch (id) { case FBNIC_SENSOR_TEMP: sensor = &fw_cmpl->u.tsene.millidegrees; @@ -750,7 +741,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, *val = *sensor; exit_cleanup: - fbnic_fw_clear_compl(fbd); + fbnic_fw_clear_cmpl(fbd, fw_cmpl); exit_free: fbnic_fw_put_cmpl(fw_cmpl); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 79a01fdd1dd1..aa812c63d5af 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -44,9 +44,10 @@ int __fbnic_open(struct fbnic_net *fbn) if (err) goto time_stop; - err = fbnic_pcs_irq_enable(fbd); + err = fbnic_pcs_request_irq(fbd); if (err) goto time_stop; + /* Pull the BMC config and initialize the RPC */ fbnic_bmc_rpc_init(fbd); fbnic_rss_reinit(fbd, fbn); @@ -82,7 +83,7 @@ static int fbnic_stop(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); fbnic_down(fbn); - fbnic_pcs_irq_disable(fbn->fbd); + fbnic_pcs_free_irq(fbn->fbd); fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); @@ -403,12 +404,16 @@ static int fbnic_hwtstamp_set(struct net_device *netdev, static void fbnic_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) { + u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0; u64 tx_bytes, tx_packets, tx_dropped = 0; - u64 rx_bytes, rx_packets, rx_dropped = 0; struct fbnic_net *fbn = netdev_priv(dev); + struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; + u64 rx_over = 0, rx_missed = 0; unsigned int start, i; + fbnic_get_hw_stats(fbd); + stats = &fbn->tx_stats; tx_bytes = stats->bytes; @@ -419,6 +424,12 @@ static void fbnic_get_stats64(struct net_device *dev, stats64->tx_packets = tx_packets; stats64->tx_dropped = tx_dropped; + /* Record drops from Tx HW Datapath */ + tx_dropped += fbd->hw_stats.tmi.drop.frames.value + + fbd->hw_stats.tti.cm_drop.frames.value + + fbd->hw_stats.tti.frame_drop.frames.value + + fbd->hw_stats.tti.tbi_drop.frames.value; + for (i = 0; i < fbn->num_tx_queues; i++) { struct fbnic_ring *txr = fbn->tx[i]; @@ -444,9 +455,34 @@ static void fbnic_get_stats64(struct net_device *dev, rx_packets = stats->packets; rx_dropped = stats->dropped; + spin_lock(&fbd->hw_stats_lock); + /* Record drops for the host FIFOs. + * 4: network to Host, 6: BMC to Host + * Exclude the BMC and MC FIFOs as those stats may contain drops + * due to unrelated items such as TCAM misses. They are still + * accessible through the ethtool stats. + */ + i = FBNIC_RXB_FIFO_HOST; + rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; + i = FBNIC_RXB_FIFO_BMC_TO_HOST; + rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value; + + for (i = 0; i < fbd->max_num_queues; i++) { + /* Report packets dropped due to CQ/BDQ being full/empty */ + rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value; + rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value; + + /* Report packets with errors */ + rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value; + } + spin_unlock(&fbd->hw_stats_lock); + stats64->rx_bytes = rx_bytes; stats64->rx_packets = rx_packets; stats64->rx_dropped = rx_dropped; + stats64->rx_over_errors = rx_over; + stats64->rx_errors = rx_errors; + stats64->rx_missed_errors = rx_missed; for (i = 0; i < fbn->num_rx_queues; i++) { struct fbnic_ring *rxr = fbn->rx[i]; @@ -486,6 +522,7 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, { struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *rxr = fbn->rx[idx]; + struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; u64 bytes, packets, alloc_fail; u64 csum_complete, csum_none; @@ -509,6 +546,15 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, rx->alloc_fail = alloc_fail; rx->csum_complete = csum_complete; rx->csum_none = csum_none; + + fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q); + + spin_lock(&fbd->hw_stats_lock); + rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value + + fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value; + rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value + + rx->hw_drop_overruns; + spin_unlock(&fbd->hw_stats_lock); } static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index 6cbbc2ee3e1f..249d3ef862d5 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -6,6 +6,7 @@ #include <linux/pci.h> #include <linux/rtnetlink.h> #include <linux/types.h> +#include <net/devlink.h> #include "fbnic.h" #include "fbnic_drvinfo.h" @@ -283,7 +284,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto free_irqs; } - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) { dev_err(&pdev->dev, "Firmware mailbox initialization failure\n"); @@ -292,6 +293,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fbnic_devlink_register(fbd); fbnic_dbg_fbd_init(fbd); + spin_lock_init(&fbd->hw_stats_lock); /* Capture snapshot of hardware stats so netdev can calculate delta */ fbnic_reset_hw_stats(fbd); @@ -363,7 +365,7 @@ static void fbnic_remove(struct pci_dev *pdev) fbnic_hwmon_unregister(fbd); fbnic_dbg_fbd_exit(fbd); fbnic_devlink_unregister(fbd); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); fbnic_free_irqs(fbd); fbnic_devlink_free(fbd); @@ -387,7 +389,11 @@ static int fbnic_pm_suspend(struct device *dev) rtnl_unlock(); null_uc_addr: - fbnic_fw_disable_mbx(fbd); + devl_lock(priv_to_devlink(fbd)); + + fbnic_fw_free_mbx(fbd); + + devl_unlock(priv_to_devlink(fbd)); /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ fbnic_free_irqs(fbd); @@ -419,11 +425,15 @@ static int __fbnic_pm_resume(struct device *dev) fbd->mac->init_regs(fbd); + devl_lock(priv_to_devlink(fbd)); + /* Re-enable mailbox */ - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) goto err_free_irqs; + devl_unlock(priv_to_devlink(fbd)); + /* No netdev means there isn't a network interface to bring up */ if (fbnic_init_failure(fbd)) return 0; @@ -438,15 +448,15 @@ static int __fbnic_pm_resume(struct device *dev) if (netif_running(netdev)) { err = __fbnic_open(fbn); if (err) - goto err_disable_mbx; + goto err_free_mbx; } rtnl_unlock(); return 0; -err_disable_mbx: +err_free_mbx: rtnl_unlock(); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); err_free_irqs: fbnic_free_irqs(fbd); err_invalidate_uc_addr: |