summaryrefslogtreecommitdiff
path: root/drivers/usb/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host')
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-fsl.c25
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-caps.h4
-rw-r--r--drivers/usb/host/xhci-dbgcap.c19
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-debugfs.c108
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c245
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c93
-rw-r--r--drivers/usb/host/xhci-sideband.c457
-rw-r--r--drivers/usb/host/xhci-tegra.c3
-rw-r--r--drivers/usb/host/xhci.c209
-rw-r--r--drivers/usb/host/xhci.h112
16 files changed, 1033 insertions, 266 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d011d6c753ed..109100cc77a3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -104,6 +104,15 @@ config USB_XHCI_RZV2M
Say 'Y' to enable the support for the xHCI host controller
found in Renesas RZ/V2M SoC.
+config USB_XHCI_SIDEBAND
+ bool "xHCI support for sideband"
+ help
+ Say 'Y' to enable the support for the xHCI sideband capability.
+ Provide a mechanism for a sideband datapath for payload associated
+ with audio class endpoints. This allows for an audio DSP to use
+ xHCI USB endpoints directly, allowing CPU to sleep while playing
+ audio.
+
config USB_XHCI_TEGRA
tristate "xHCI support for NVIDIA Tegra SoCs"
depends on PHY_TEGRA_XUSB
@@ -225,7 +234,7 @@ config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP || COMPILE_TEST
depends on NOP_USB_XCEIV
- default y
+ default ARCH_OMAP
help
Enables support for the on-chip EHCI controller on
OMAP3 and later chips.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index be4e5245c52f..4df946c05ba0 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,6 +32,10 @@ endif
xhci-rcar-hcd-y += xhci-rcar.o
xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o
+ifneq ($(CONFIG_USB_XHCI_SIDEBAND),)
+ xhci-hcd-y += xhci-sideband.o
+endif
+
obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 26f13278d4d6..6ed2fa5418a4 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -410,15 +410,13 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
return retval;
}
-struct ehci_fsl {
- struct ehci_hcd ehci;
-
-#ifdef CONFIG_PM
+struct ehci_fsl_priv {
/* Saved USB PHY settings, need to restore after deep sleep. */
u32 usb_ctrl;
-#endif
};
+#define hcd_to_ehci_fsl_priv(h) ((struct ehci_fsl_priv *) hcd_to_ehci(h)->priv)
+
#ifdef CONFIG_PM
#ifdef CONFIG_PPC_MPC512x
@@ -566,17 +564,10 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
}
#endif /* CONFIG_PPC_MPC512x */
-static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- return container_of(ehci, struct ehci_fsl, ehci);
-}
-
static int ehci_fsl_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
@@ -589,14 +580,14 @@ static int ehci_fsl_drv_suspend(struct device *dev)
if (!fsl_deep_sleep())
return 0;
- ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ priv->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
return 0;
}
static int ehci_fsl_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
@@ -612,7 +603,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
usb_root_hub_lost_power(hcd->self.root_hub);
/* Restore USB PHY settings and enable the controller. */
- iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
+ iowrite32be(priv->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
ehci_reset(ehci);
ehci_fsl_reinit(ehci);
@@ -671,7 +662,7 @@ static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
#endif /* CONFIG_USB_OTG */
static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = {
- .extra_priv_size = sizeof(struct ehci_fsl),
+ .extra_priv_size = sizeof(struct ehci_fsl_priv),
.reset = ehci_fsl_setup,
};
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index a7c934404ebc..62318291f566 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
}
/* Get and enable clock if any specified */
- uhci->clk = devm_clk_get(&pdev->dev, NULL);
+ uhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(uhci->clk)) {
ret = PTR_ERR(uhci->clk);
goto err_rmr;
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
index f6b9a00a0ab9..4b8ff4815644 100644
--- a/drivers/usb/host/xhci-caps.h
+++ b/drivers/usb/host/xhci-caps.h
@@ -62,8 +62,8 @@
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
+/* db_off bitmask - bits 31:2 Doorbell Array Offset */
+#define DBOFF_MASK (0xfffffffc)
/* run_regs_off bitmask - bits 0:4 reserved */
#define RTSOFF_MASK (~0x1f)
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index fd7895b24367..0d4ce5734165 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -823,6 +823,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
union xhci_trb *evt;
+ enum evtreturn ret = EVT_DONE;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -909,6 +910,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -927,7 +929,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
lo_hi_writeq(deq, &dbc->regs->erdp);
}
- return EVT_DONE;
+ return ret;
}
static void xhci_dbc_handle_events(struct work_struct *work)
@@ -936,6 +938,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
struct xhci_dbc *dbc;
unsigned long flags;
unsigned int poll_interval;
+ unsigned long busypoll_timelimit;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
poll_interval = dbc->poll_interval;
@@ -954,11 +957,21 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
- /* set fast poll rate if there are pending data transfers */
+ /*
+ * Set fast poll rate if there are pending out transfers, or
+ * a transfer was recently processed
+ */
+ busypoll_timelimit = dbc->xfer_timestamp +
+ msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
+
if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
- !list_empty(&dbc->eps[BULK_IN].list_pending))
+ time_is_after_jiffies(busypoll_timelimit))
poll_interval = 0;
break;
+ case EVT_XFER_DONE:
+ dbc->xfer_timestamp = jiffies;
+ poll_interval = 0;
+ break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 9dc8f4d8077c..47ac72c2286d 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -96,6 +96,7 @@ struct dbc_ep {
#define DBC_WRITE_BUF_SIZE 8192
#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
+#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */
/*
* Private structure for DbC hardware state:
*/
@@ -142,6 +143,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
unsigned int poll_interval; /* ms */
+ unsigned long xfer_timestamp;
unsigned resume_required:1;
struct dbc_ep eps[2];
@@ -187,6 +189,7 @@ struct dbc_request {
enum evtreturn {
EVT_ERR = -1,
EVT_DONE,
+ EVT_XFER_DONE,
EVT_GSER,
EVT_DISC,
};
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 1f5ef174abea..c6d44977193f 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -631,6 +631,112 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
}
}
+static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed,
+ struct seq_file *s)
+{
+ unsigned int num_ports;
+ unsigned int i;
+ int ret;
+ struct xhci_container_ctx *ctx;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct device *dev = hcd->self.controller;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ ctx = xhci_alloc_port_bw_ctx(xhci, 0);
+ if (!ctx) {
+ pm_runtime_put_sync(dev);
+ return -ENOMEM;
+ }
+
+ /* get roothub port bandwidth */
+ ret = xhci_get_port_bandwidth(xhci, ctx, dev_speed);
+ if (ret)
+ goto err_out;
+
+ /* print all roothub ports available bandwidth
+ * refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved
+ */
+ for (i = 1; i < num_ports+1; i++)
+ seq_printf(s, "port[%d] available bw: %d%%.\n", i,
+ ctx->bytes[i]);
+err_out:
+ pm_runtime_put_sync(dev);
+ xhci_free_port_bw_ctx(xhci, ctx);
+ return ret;
+}
+
+static int xhci_ss_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_SUPER, s);
+ return ret;
+}
+
+static int xhci_hs_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_HIGH, s);
+ return ret;
+}
+
+static int xhci_fs_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_FULL, s);
+ return ret;
+}
+
+static struct xhci_file_map bw_context_files[] = {
+ {"SS_BW", xhci_ss_bw_show, },
+ {"HS_BW", xhci_hs_bw_show, },
+ {"FS_BW", xhci_fs_bw_show, },
+};
+
+static int bw_context_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(bw_context_files); i++) {
+ f_map = &bw_context_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations bw_fops = {
+ .open = bw_context_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci,
+ struct dentry *parent)
+{
+ parent = debugfs_create_dir("port_bandwidth", parent);
+
+ xhci_debugfs_create_files(xhci, bw_context_files,
+ ARRAY_SIZE(bw_context_files),
+ xhci,
+ parent, &bw_fops);
+}
+
void xhci_debugfs_init(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -681,6 +787,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
+
+ xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root);
}
void xhci_debugfs_exit(struct xhci_hcd *xhci)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 486347776cb2..92bb84f8132a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1907,7 +1907,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
* prevent port event interrupts from interfering
* with usb2 port resume process
*/
- xhci_disable_interrupter(xhci->interrupters[0]);
+ xhci_disable_interrupter(xhci, xhci->interrupters[0]);
disabled_irq = true;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d698095fc88d..bd745a0f2f78 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -484,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
kfree(ctx);
}
+struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci,
+ gfp_t flags)
+{
+ struct xhci_container_ctx *ctx;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
+ if (!ctx)
+ return NULL;
+
+ ctx->size = GET_PORT_BW_ARRAY_SIZE;
+
+ ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma);
+ if (!ctx->bytes) {
+ kfree(ctx);
+ return NULL;
+ }
+ return ctx;
+}
+
+void xhci_free_port_bw_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma);
+ kfree(ctx);
+}
+
struct xhci_input_control_ctx *xhci_get_input_control_ctx(
struct xhci_container_ctx *ctx)
{
@@ -1802,10 +1831,10 @@ xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
if (ir->ir_set) {
tmp = readl(&ir->ir_set->erst_size);
- tmp &= ERST_SIZE_MASK;
+ tmp &= ~ERST_SIZE_MASK;
writel(tmp, &ir->ir_set->erst_size);
- xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, ir, true);
}
}
@@ -1848,6 +1877,11 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup
return;
}
+ /*
+ * Cleanup secondary interrupter to ensure there are no pending events.
+ * This also updates event ring dequeue pointer back to the start.
+ */
+ xhci_skip_sec_intr_events(xhci, ir->event_ring, ir);
intr_num = ir->intr_num;
xhci_remove_interrupter(xhci, ir);
@@ -1907,6 +1941,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
+ dma_pool_destroy(xhci->port_bw_pool);
+ xhci->port_bw_pool = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed xhci port bw array pool");
+
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2282,37 +2321,25 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
return ir;
}
-static int
-xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
- unsigned int intr_num)
+void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num)
{
+ struct xhci_interrupter *ir;
u64 erst_base;
u32 erst_size;
- if (intr_num >= xhci->max_interrupters) {
- xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
- intr_num, xhci->max_interrupters);
- return -EINVAL;
- }
-
- if (xhci->interrupters[intr_num]) {
- xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
- return -EINVAL;
- }
-
- xhci->interrupters[intr_num] = ir;
+ ir = xhci->interrupters[intr_num];
ir->intr_num = intr_num;
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
- erst_size &= ERST_SIZE_MASK;
+ erst_size &= ~ERST_SIZE_MASK;
erst_size |= ir->event_ring->num_segs;
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
- erst_base &= ERST_BASE_RSVDP;
- erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
+ erst_base &= ~ERST_BASE_ADDRESS_MASK;
+ erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK;
if (xhci->quirks & XHCI_WRITE_64_HI_LO)
hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
else
@@ -2320,20 +2347,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
/* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir);
-
- return 0;
}
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
- u32 imod_interval)
+ u32 imod_interval, unsigned int intr_num)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
unsigned int i;
int err = -ENOSPC;
- if (!xhci->interrupters || xhci->max_interrupters <= 1)
+ if (!xhci->interrupters || xhci->max_interrupters <= 1 ||
+ intr_num >= xhci->max_interrupters)
return NULL;
ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
@@ -2341,15 +2367,23 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
return NULL;
spin_lock_irq(&xhci->lock);
-
- /* Find available secondary interrupter, interrupter 0 is reserved for primary */
- for (i = 1; i < xhci->max_interrupters; i++) {
- if (xhci->interrupters[i] == NULL) {
- err = xhci_add_interrupter(xhci, ir, i);
- break;
+ if (!intr_num) {
+ /* Find available secondary interrupter, interrupter 0 is reserved for primary */
+ for (i = 1; i < xhci->max_interrupters; i++) {
+ if (!xhci->interrupters[i]) {
+ xhci->interrupters[i] = ir;
+ xhci_add_interrupter(xhci, i);
+ err = 0;
+ break;
+ }
+ }
+ } else {
+ if (!xhci->interrupters[intr_num]) {
+ xhci->interrupters[intr_num] = ir;
+ xhci_add_interrupter(xhci, intr_num);
+ err = 0;
}
}
-
spin_unlock_irq(&xhci->lock);
if (err) {
@@ -2359,78 +2393,32 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
return NULL;
}
- err = xhci_set_interrupter_moderation(ir, imod_interval);
- if (err)
- xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
- i, imod_interval);
+ xhci_set_interrupter_moderation(ir, imod_interval);
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
- i, xhci->max_interrupters);
+ ir->intr_num, xhci->max_interrupters);
return ir;
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
-static void xhci_hcd_page_size(struct xhci_hcd *xhci)
-{
- u32 page_size;
-
- page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
- if (!is_power_of_2(page_size)) {
- xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
- /* Fallback to 4K page size, since that's common */
- page_size = 1;
- }
-
- xhci->page_size = page_size << 12;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
- xhci->page_size >> 10);
-}
-
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
- struct xhci_interrupter *ir;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
dma_addr_t dma;
- unsigned int val, val2;
- u64 val_64;
- u32 temp;
- int i;
-
- INIT_LIST_HEAD(&xhci->cmd_list);
-
- /* init command timeout work */
- INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
- init_completion(&xhci->cmd_ring_stop_completion);
-
- xhci_hcd_page_size(xhci);
-
- /*
- * Program the Number of Device Slots Enabled field in the CONFIG
- * register with the max value of slots the HC can handle.
- */
- val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// xHC can handle at most %d device slots.", val);
- val2 = readl(&xhci->op_regs->config_reg);
- val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting Max device slots reg = 0x%x.", val);
- writel(val, &xhci->op_regs->config_reg);
/*
* xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags);
if (!xhci->dcbaa)
goto fail;
+
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Device context base array address = 0x%pad (DMA), %p (virt)",
- &xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ "Device context base array address = 0x%pad (DMA), %p (virt)",
+ &xhci->dcbaa->dma, xhci->dcbaa);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2446,92 +2434,77 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
else
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ if (!xhci->segment_pool)
+ goto fail;
/* See Table 46 and Note on Figure 55 */
- xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
- 2112, 64, xhci->page_size);
- if (!xhci->segment_pool || !xhci->device_pool)
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64,
+ xhci->page_size);
+ if (!xhci->device_pool)
goto fail;
- /* Linear stream context arrays don't have any boundary restrictions,
+ /*
+ * Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
- xhci->small_streams_pool =
- dma_pool_create("xHCI 256 byte stream ctx arrays",
- dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
- xhci->medium_streams_pool =
- dma_pool_create("xHCI 1KB stream ctx arrays",
- dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
- /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
- * will be allocated with dma_alloc_coherent()
+ xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->small_streams_pool)
+ goto fail;
+
+ /*
+ * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be
+ * allocated with dma_alloc_coherent().
*/
- if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->medium_streams_pool)
+ goto fail;
+
+ /*
+ * refer to xhci rev1_2 protocol 5.3.3 max ports is 255.
+ * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need
+ * to be 16-byte aligned.
+ */
+ xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays",
+ dev, GET_PORT_BW_ARRAY_SIZE, 16, 0);
+ if (!xhci->port_bw_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocated command ring at %p", xhci->cmd_ring);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
- &xhci->cmd_ring->first_seg->dma);
- /* Set the address in the Command Ring Control register */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%016llx", val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
+ &xhci->cmd_ring->first_seg->dma);
- /* Reserve one command ring TRB for disabling LPM.
+ /*
+ * Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
- val = readl(&xhci->cap_regs->db_off);
- val &= DBOFF_MASK;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Doorbell array is located at offset 0x%x from cap regs base addr",
- val);
- xhci->dba = (void __iomem *) xhci->cap_regs + val;
-
/* Allocate and set up primary interrupter 0 with an event ring. */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocating primary event ring");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring");
xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
flags, dev_to_node(dev));
-
- ir = xhci_alloc_interrupter(xhci, 0, flags);
- if (!ir)
+ if (!xhci->interrupters)
goto fail;
- if (xhci_add_interrupter(xhci, ir, 0))
+ xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!xhci->interrupters[0])
goto fail;
- ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
-
- for (i = 0; i < MAX_HC_SLOTS; i++)
- xhci->devs[i] = NULL;
-
if (scratchpad_alloc(xhci, flags))
goto fail;
+
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
- /* Enable USB 3.0 device notifications for function remote wake, which
- * is necessary for allowing USB 3.0 devices to do remote wakeup from
- * U3 (device suspend).
- */
- temp = readl(&xhci->op_regs->dev_notification);
- temp &= ~DEV_NOTE_MASK;
- temp |= DEV_NOTE_FWAKE;
- writel(temp, &xhci->op_regs->dev_notification);
-
return 0;
fail:
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3155e3a842da..6dab142e7278 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -267,6 +267,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
+ device_property_read_u16(tmpdev, "num-hc-interrupters",
+ &xhci->max_interrupters);
}
/*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b906bc2eea5f..e038ad3375dc 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -699,7 +699,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
int new_cycle;
dma_addr_t addr;
u64 hw_dequeue;
- bool cycle_found = false;
+ bool hw_dequeue_found = false;
bool td_last_trb_found = false;
u32 trb_sct = 0;
int ret;
@@ -715,25 +715,24 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
- new_cycle = hw_dequeue & 0x1;
+ new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE;
/*
- * We want to find the pointer, segment and cycle state of the new trb
- * (the one after current TD's end_trb). We know the cycle state at
- * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
- * found.
+ * Walk the ring until both the next TRB and hw_dequeue are found (don't
+ * move hw_dequeue back if it went forward due to a HW bug). Cycle state
+ * is loaded from a known good TRB, track later toggles to maintain it.
*/
do {
- if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
+ if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
- cycle_found = true;
+ hw_dequeue_found = true;
if (td_last_trb_found)
break;
}
if (new_deq == td->end_trb)
td_last_trb_found = true;
- if (cycle_found && trb_is_link(new_deq) &&
+ if (td_last_trb_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
@@ -745,7 +744,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
return -EINVAL;
}
- } while (!cycle_found || !td_last_trb_found);
+ } while (!hw_dequeue_found || !td_last_trb_found);
/* Don't update the ring cycle state for the producer (us). */
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
@@ -1899,6 +1898,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
break;
+ case TRB_GET_BW:
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
@@ -2978,9 +2979,6 @@ debug_finding_td:
(unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb),
(unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb));
- xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg)
- xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma);
-
return -ESHUTDOWN;
err_out:
@@ -3051,9 +3049,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
-static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
- struct xhci_interrupter *ir,
- bool clear_ehb)
+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir,
+ bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
@@ -3084,11 +3082,14 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
{
if (!ir->ip_autoclear) {
- u32 irq_pending;
+ u32 iman;
+
+ iman = readl(&ir->ir_set->iman);
+ iman |= IMAN_IP;
+ writel(iman, &ir->ir_set->iman);
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
+ /* Read operation to guarantee the write has been flushed from posted buffers */
+ readl(&ir->ir_set->iman);
}
}
@@ -3096,10 +3097,11 @@ static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
* Handle all OS-owned events on an interrupter event ring. It may drop
* and reaquire xhci->lock between event processing.
*/
-static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ bool skip_events)
{
int event_loop = 0;
- int err;
+ int err = 0;
u64 temp;
xhci_clear_interrupt_pending(ir);
@@ -3122,7 +3124,8 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
/* Process all OS owned event TRBs on this event ring */
while (unhandled_event_trb(ir->event_ring)) {
- err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+ if (!skip_events)
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
/*
* If half a segment of events have been handled in one go then
@@ -3150,6 +3153,37 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
}
/*
+ * Move the event ring dequeue pointer to skip events kept in the secondary
+ * event ring. This is used to ensure that pending events in the ring are
+ * acknowledged, so the xHCI HCD can properly enter suspend/resume. The
+ * secondary ring is typically maintained by an external component.
+ */
+void xhci_skip_sec_intr_events(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, struct xhci_interrupter *ir)
+{
+ union xhci_trb *current_trb;
+ u64 erdp_reg;
+ dma_addr_t deq;
+
+ /* disable irq, ack pending interrupt and ack all pending events */
+ xhci_disable_interrupter(xhci, ir);
+
+ /* last acked event trb is in erdp reg */
+ erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ deq = (dma_addr_t)(erdp_reg & ERST_PTR_MASK);
+ if (!deq) {
+ xhci_err(xhci, "event ring handling not required\n");
+ return;
+ }
+
+ current_trb = ir->event_ring->dequeue;
+ /* read cycle state of the last acked trb to find out CCS */
+ ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE;
+
+ xhci_handle_events(xhci, ir, true);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -3193,7 +3227,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
- xhci_handle_events(xhci, xhci->interrupters[0]);
+ xhci_handle_events(xhci, xhci->interrupters[0], false);
out:
spin_unlock(&xhci->lock);
@@ -4415,6 +4449,17 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
command_must_succeed);
}
+/* Queue a get root hub port bandwidth command TRB */
+int xhci_queue_get_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, bool command_must_succeed)
+{
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed),
+ command_must_succeed);
+}
+
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c
new file mode 100644
index 000000000000..d49f9886dd84
--- /dev/null
+++ b/drivers/usb/host/xhci-sideband.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * xHCI host controller sideband support
+ *
+ * Copyright (c) 2023-2025, Intel Corporation.
+ *
+ * Author: Mathias Nyman
+ */
+
+#include <linux/usb/xhci-sideband.h>
+#include <linux/dma-direct.h>
+
+#include "xhci.h"
+
+/* sideband internal helpers */
+static struct sg_table *
+xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct sg_table *sgt;
+ unsigned int n_pages;
+ struct page **pages;
+ struct device *dev;
+ size_t sz;
+ int i;
+
+ dev = xhci_to_hcd(sb->xhci)->self.sysdev;
+ sz = ring->num_segs * TRB_SEGMENT_SIZE;
+ n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
+ pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ kvfree(pages);
+ return NULL;
+ }
+
+ seg = ring->first_seg;
+ if (!seg)
+ goto err;
+ /*
+ * Rings can potentially have multiple segments, create an array that
+ * carries page references to allocated segments. Utilize the
+ * sg_alloc_table_from_pages() to create the sg table, and to ensure
+ * that page links are created.
+ */
+ for (i = 0; i < ring->num_segs; i++) {
+ dma_get_sgtable(dev, sgt, seg->trbs, seg->dma,
+ TRB_SEGMENT_SIZE);
+ pages[i] = sg_page(sgt->sgl);
+ sg_free_table(sgt);
+ seg = seg->next;
+ }
+
+ if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL))
+ goto err;
+
+ /*
+ * Save first segment dma address to sg dma_address field for the sideband
+ * client to have access to the IOVA of the ring.
+ */
+ sg_dma_address(sgt->sgl) = ring->first_seg->dma;
+
+ return sgt;
+
+err:
+ kvfree(pages);
+ kfree(sgt);
+
+ return NULL;
+}
+
+static void
+__xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep)
+{
+ /*
+ * Issue a stop endpoint command when an endpoint is removed.
+ * The stop ep cmd handler will handle the ring cleanup.
+ */
+ xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
+
+ ep->sideband = NULL;
+ sb->eps[ep->ep_index] = NULL;
+}
+
+/* sideband api functions */
+
+/**
+ * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free
+ * @sb: sideband instance for this usb device
+ * @ep_index: usb endpoint index
+ *
+ * Notifies the xHCI sideband client driver of a xHCI transfer ring free
+ * routine. This will allow for the client to ensure that all transfers
+ * are completed.
+ *
+ * The callback should be synchronous, as the ring free happens after.
+ */
+void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb,
+ unsigned int ep_index)
+{
+ struct xhci_sideband_event evt;
+
+ evt.type = XHCI_SIDEBAND_XFER_RING_FREE;
+ evt.evt_data = &ep_index;
+
+ if (sb->notify_client)
+ sb->notify_client(sb->intf, &evt);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free);
+
+/**
+ * xhci_sideband_add_endpoint - add endpoint to sideband access list
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Adds an endpoint to the list of sideband accessed endpoints for this usb
+ * device.
+ * After an endpoint is added the sideband client can get the endpoint transfer
+ * ring buffer by calling xhci_sideband_endpoint_buffer()
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+int
+xhci_sideband_add_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ mutex_lock(&sb->mutex);
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = &sb->vdev->eps[ep_index];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ mutex_unlock(&sb->mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * Note, we don't know the DMA mask of the audio DSP device, if its
+ * smaller than for xhci it won't be able to access the endpoint ring
+ * buffer. This could be solved by not allowing the audio class driver
+ * to add the endpoint the normal way, but instead offload it immediately,
+ * and let this function add the endpoint and allocate the ring buffer
+ * with the smallest common DMA mask
+ */
+ if (sb->eps[ep_index] || ep->sideband) {
+ mutex_unlock(&sb->mutex);
+ return -EBUSY;
+ }
+
+ ep->sideband = sb;
+ sb->eps[ep_index] = ep;
+ mutex_unlock(&sb->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint);
+
+/**
+ * xhci_sideband_remove_endpoint - remove endpoint from sideband access list
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Removes an endpoint from the list of sideband accessed endpoints for this usb
+ * device.
+ * sideband client should no longer touch the endpoint transfer buffer after
+ * calling this.
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+int
+xhci_sideband_remove_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ mutex_lock(&sb->mutex);
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->sideband || ep->sideband != sb) {
+ mutex_unlock(&sb->mutex);
+ return -ENODEV;
+ }
+
+ __xhci_sideband_remove_endpoint(sb, ep);
+ xhci_initialize_ring_info(ep->ring);
+ mutex_unlock(&sb->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint);
+
+int
+xhci_sideband_stop_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->sideband || ep->sideband != sb)
+ return -EINVAL;
+
+ return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint);
+
+/**
+ * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Returns the address of the endpoint buffer where xHC controller reads queued
+ * transfer TRBs from. This is the starting address of the ringbuffer where the
+ * sideband client should write TRBs to.
+ *
+ * Caller needs to free the returned sg_table
+ *
+ * Return: struct sg_table * if successful. NULL otherwise.
+ */
+struct sg_table *
+xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb)
+ return NULL;
+
+ return xhci_ring_to_sgtable(sb, ep->ring);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer);
+
+/**
+ * xhci_sideband_get_event_buffer - return the event buffer for this device
+ * @sb: sideband instance for this usb device
+ *
+ * If a secondary xhci interupter is set up for this usb device then this
+ * function returns the address of the event buffer where xHC writes
+ * the transfer completion events.
+ *
+ * Caller needs to free the returned sg_table
+ *
+ * Return: struct sg_table * if successful. NULL otherwise.
+ */
+struct sg_table *
+xhci_sideband_get_event_buffer(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return NULL;
+
+ return xhci_ring_to_sgtable(sb, sb->ir->event_ring);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer);
+
+/**
+ * xhci_sideband_create_interrupter - creates a new interrupter for this sideband
+ * @sb: sideband instance for this usb device
+ * @num_seg: number of event ring segments to allocate
+ * @ip_autoclear: IP autoclearing support such as MSI implemented
+ *
+ * Sets up a xhci interrupter that can be used for this sideband accessed usb
+ * device. Transfer events for this device can be routed to this interrupters
+ * event ring by setting the 'Interrupter Target' field correctly when queueing
+ * the transfer TRBs.
+ * Once this interrupter is created the interrupter target ID can be obtained
+ * by calling xhci_sideband_interrupter_id()
+ *
+ * Returns 0 on success, negative error otherwise
+ */
+int
+xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg,
+ bool ip_autoclear, u32 imod_interval, int intr_num)
+{
+ int ret = 0;
+
+ if (!sb || !sb->xhci)
+ return -ENODEV;
+
+ mutex_lock(&sb->mutex);
+ if (sb->ir) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci),
+ num_seg, imod_interval,
+ intr_num);
+ if (!sb->ir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sb->ir->ip_autoclear = ip_autoclear;
+
+out:
+ mutex_unlock(&sb->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter);
+
+/**
+ * xhci_sideband_remove_interrupter - remove the interrupter from a sideband
+ * @sb: sideband instance for this usb device
+ *
+ * Removes a registered interrupt for a sideband. This would allow for other
+ * sideband users to utilize this interrupter.
+ */
+void
+xhci_sideband_remove_interrupter(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return;
+
+ mutex_lock(&sb->mutex);
+ xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir);
+
+ sb->ir = NULL;
+ mutex_unlock(&sb->mutex);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter);
+
+/**
+ * xhci_sideband_interrupter_id - return the interrupter target id
+ * @sb: sideband instance for this usb device
+ *
+ * If a secondary xhci interrupter is set up for this usb device then this
+ * function returns the ID used by the interrupter. The sideband client
+ * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs
+ * it queues on the endpoints transfer ring to ensure transfer completion event
+ * are written by xHC to the correct interrupter event ring.
+ *
+ * Returns interrupter id on success, negative error othgerwise
+ */
+int
+xhci_sideband_interrupter_id(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return -ENODEV;
+
+ return sb->ir->intr_num;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id);
+
+/**
+ * xhci_sideband_register - register a sideband for a usb device
+ * @intf: usb interface associated with the sideband device
+ *
+ * Allows for clients to utilize XHCI interrupters and fetch transfer and event
+ * ring parameters for executing data transfers.
+ *
+ * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise.
+ */
+struct xhci_sideband *
+xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type,
+ int (*notify_client)(struct usb_interface *intf,
+ struct xhci_sideband_event *evt))
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *vdev;
+ struct xhci_sideband *sb;
+
+ /*
+ * Make sure the usb device is connected to a xhci controller. Fail
+ * registration if the type is anything other than XHCI_SIDEBAND_VENDOR,
+ * as this is the only type that is currently supported by xhci-sideband.
+ */
+ if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR)
+ return NULL;
+
+ sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev));
+ if (!sb)
+ return NULL;
+
+ mutex_init(&sb->mutex);
+
+ /* check this device isn't already controlled via sideband */
+ spin_lock_irq(&xhci->lock);
+
+ vdev = xhci->devs[udev->slot_id];
+
+ if (!vdev || vdev->sideband) {
+ xhci_warn(xhci, "XHCI sideband for slot %d already in use\n",
+ udev->slot_id);
+ spin_unlock_irq(&xhci->lock);
+ kfree(sb);
+ return NULL;
+ }
+
+ sb->xhci = xhci;
+ sb->vdev = vdev;
+ sb->intf = intf;
+ sb->type = type;
+ sb->notify_client = notify_client;
+ vdev->sideband = sb;
+
+ spin_unlock_irq(&xhci->lock);
+
+ return sb;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_register);
+
+/**
+ * xhci_sideband_unregister - unregister sideband access to a usb device
+ * @sb: sideband instance to be unregistered
+ *
+ * Unregisters sideband access to a usb device and frees the sideband
+ * instance.
+ * After this the endpoint and interrupter event buffers should no longer
+ * be accessed via sideband. The xhci driver can now take over handling
+ * the buffers.
+ */
+void
+xhci_sideband_unregister(struct xhci_sideband *sb)
+{
+ struct xhci_hcd *xhci;
+ int i;
+
+ if (!sb)
+ return;
+
+ xhci = sb->xhci;
+
+ mutex_lock(&sb->mutex);
+ for (i = 0; i < EP_CTX_PER_DEV; i++)
+ if (sb->eps[i])
+ __xhci_sideband_remove_endpoint(sb, sb->eps[i]);
+ mutex_unlock(&sb->mutex);
+
+ xhci_sideband_remove_interrupter(sb);
+
+ spin_lock_irq(&xhci->lock);
+ sb->xhci = NULL;
+ sb->vdev->sideband = NULL;
+ spin_unlock_irq(&xhci->lock);
+
+ kfree(sb);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_unregister);
+MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index b5c362c2051d..0c7af44d4dae 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
+ pm_runtime_get_sync(tegra->dev);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
tegra_xhci_set_port_power(tegra, true, true);
+ pm_runtime_mark_last_busy(tegra->dev);
} else {
if (tegra->otg_usb3_port >= 0)
@@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra_xhci_set_port_power(tegra, true, false);
}
+ pm_runtime_put_autosuspend(tegra->dev);
}
#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 90eb491267b5..803047bec3e7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -20,6 +20,7 @@
#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
+#include <linux/usb/xhci-sideband.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -329,21 +330,29 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir)
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman |= IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+ /* Read operation to guarantee the write has been flushed from posted buffers */
+ readl(&ir->ir_set->iman);
return 0;
}
-int xhci_disable_interrupter(struct xhci_interrupter *ir)
+int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman &= ~IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+
+ iman = readl(&ir->ir_set->iman);
+ if (iman & IMAN_IP)
+ xhci_dbg(xhci, "%s: Interrupt pending\n", __func__);
return 0;
}
@@ -354,13 +363,16 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
{
u32 imod;
- if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ if (!ir || !ir->ir_set)
return -EINVAL;
- imod = readl(&ir->ir_set->irq_control);
- imod &= ~ER_IRQ_INTERVAL_MASK;
- imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(imod, &ir->ir_set->irq_control);
+ /* IMODI value in IMOD register is in 250ns increments */
+ imod_interval = umin(imod_interval / 250, IMODI_MASK);
+
+ imod = readl(&ir->ir_set->imod);
+ imod &= ~IMODI_MASK;
+ imod |= imod_interval;
+ writel(imod, &ir->ir_set->imod);
return 0;
}
@@ -460,6 +472,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
}
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
+static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci)
+{
+ u32 config_reg;
+ u32 max_slots;
+
+ max_slots = HCS_MAX_SLOTS(xhci->hcs_params1);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots",
+ max_slots);
+
+ config_reg = readl(&xhci->op_regs->config_reg);
+ config_reg &= ~HCS_SLOTS_MASK;
+ config_reg |= max_slots;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x",
+ config_reg);
+ writel(config_reg, &xhci->op_regs->config_reg);
+}
+
+static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+{
+ dma_addr_t deq_dma;
+ u64 crcr;
+
+ deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue);
+ deq_dma &= CMD_RING_PTR_MASK;
+
+ crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ crcr &= ~CMD_RING_PTR_MASK;
+ crcr |= deq_dma;
+
+ crcr &= ~CMD_RING_CYCLE;
+ crcr |= xhci->cmd_ring->cycle_state;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr);
+ xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring);
+}
+
+static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci)
+{
+ u32 offset;
+
+ offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK;
+ xhci->dba = (void __iomem *)xhci->cap_regs + offset;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Doorbell array is located at offset 0x%x from cap regs base addr", offset);
+}
+
+/*
+ * Enable USB 3.0 device notifications for function remote wake, which is necessary
+ * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend).
+ */
+static void xhci_set_dev_notifications(struct xhci_hcd *xhci)
+{
+ u32 dev_notf;
+
+ dev_notf = readl(&xhci->op_regs->dev_notification);
+ dev_notf &= ~DEV_NOTE_MASK;
+ dev_notf |= DEV_NOTE_FWAKE;
+ writel(dev_notf, &xhci->op_regs->dev_notification);
+}
/*
* Initialize memory for HCD and xHC (one-time init).
@@ -473,11 +561,37 @@ static int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__);
spin_lock_init(&xhci->lock);
+ INIT_LIST_HEAD(&xhci->cmd_list);
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ init_completion(&xhci->cmd_ring_stop_completion);
+ xhci_hcd_page_size(xhci);
+ memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs));
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
+ if (retval)
+ return retval;
+
+ /* Set the Number of Device Slots Enabled to the maximum supported value */
+ xhci_enable_max_dev_slots(xhci);
+
+ /* Set the address in the Command Ring Control register */
+ xhci_set_cmd_ring_deq(xhci);
+
+ /* Set Device Context Base Address Array pointer */
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
+
+ /* Set Doorbell array pointer */
+ xhci_set_doorbell_ptr(xhci);
+
+ /* Set USB 3.0 device notifications for function remote wake */
+ xhci_set_dev_notifications(xhci);
+
+ /* Initialize the Primary interrupter */
+ xhci_add_interrupter(xhci, 0);
+ xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
@@ -485,7 +599,8 @@ static int xhci_init(struct usb_hcd *hcd)
compliance_mode_recovery_timer_init(xhci);
}
- return retval;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__);
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -640,7 +755,7 @@ void xhci_stop(struct usb_hcd *hcd)
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(ir);
+ xhci_disable_interrupter(xhci, ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -719,8 +834,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
- ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+ ir->s3_iman = readl(&ir->ir_set->iman);
+ ir->s3_imod = readl(&ir->ir_set->imod);
}
}
@@ -743,28 +858,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
- writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
- writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+ writel(ir->s3_iman, &ir->ir_set->iman);
+ writel(ir->s3_imod, &ir->ir_set->imod);
}
}
-static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
-{
- u64 val_64;
-
- /* step 2: initialize command ring buffer */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue) &
- (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%llx",
- (long unsigned long) val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
-}
-
/*
* The whole command ring must be cleared to zero when we suspend the host.
*
@@ -1092,7 +1190,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(xhci->interrupters[0]);
+ xhci_disable_interrupter(xhci, xhci->interrupters[0]);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -1359,6 +1457,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
* value to right shift 1 for the bitmask.
+ * @desc: USB endpoint descriptor to determine index for
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
@@ -3089,6 +3188,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
+/* Get the available bandwidth of the ports under the xhci roothub */
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ u8 dev_speed)
+{
+ struct xhci_command *cmd;
+ unsigned long flags;
+ int ret;
+
+ if (!ctx || !xhci)
+ return -EINVAL;
+
+ cmd = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->in_ctx = ctx;
+
+ /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto err_out;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(cmd->completion);
+err_out:
+ kfree(cmd->completion);
+ kfree(cmd);
+
+ return ret;
+}
+
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -3911,6 +4046,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
+ if (ep->sideband)
+ xhci_sideband_notify_ep_ring_free(ep->sideband, i);
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 242ab9fbc8ae..49887a303e43 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -152,10 +152,6 @@ struct xhci_op_regs {
#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
#define XHCI_RESET_SHORT_USEC (250 * 1000)
-/* IMAN - Interrupt Management Register */
-#define IMAN_IE (1 << 1)
-#define IMAN_IP (1 << 0)
-
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT XHCI_STS_HALT
@@ -184,23 +180,22 @@ struct xhci_op_regs {
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
-#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
-#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+#define DEV_NOTE_FWAKE (1 << 1)
/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
-/* bit 0 is the command ring cycle state */
+/* bit 0 - Cycle bit indicates the ownership of the command ring */
+#define CMD_RING_CYCLE (1 << 0)
/* stop ring operation after completion of the currently executing command */
#define CMD_RING_PAUSE (1 << 1)
/* stop ring immediately - abort the currently executing command */
#define CMD_RING_ABORT (1 << 2)
/* true: command ring is running */
#define CMD_RING_RUNNING (1 << 3)
-/* bits 4:5 reserved and should be preserved */
-/* Command Ring pointer - bit mask for the lower 32 bits. */
-#define CMD_RING_RSVD_BITS (0x3f)
+/* bits 63:6 - Command Ring pointer */
+#define CMD_RING_PTR_MASK GENMASK_ULL(63, 6)
/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -215,14 +210,13 @@ struct xhci_op_regs {
#define XHCI_PAGE_SIZE_MASK 0xffff
/**
- * struct xhci_intr_reg - Interrupt Register Set
- * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * struct xhci_intr_reg - Interrupt Register Set, v1.2 section 5.5.2.
+ * @iman: IMAN - Interrupt Management Register. Used to enable
* interrupts and check for pending interrupts.
- * @irq_control: IMOD - Interrupt Moderation Register.
- * Used to throttle interrupts.
- * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
- * @erst_base: ERST base address.
- * @erst_dequeue: Event ring dequeue pointer.
+ * @imod: IMOD - Interrupt Moderation Register. Used to throttle interrupts.
+ * @erst_size: ERSTSZ - Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERSTBA - Event ring segment table base address.
+ * @erst_dequeue: ERDP - Event ring dequeue pointer.
*
* Each interrupter (defined by a MSI-X vector) has an event ring and an Event
* Ring Segment Table (ERST) associated with it. The event ring is comprised of
@@ -232,48 +226,51 @@ struct xhci_op_regs {
* updates the dequeue pointer.
*/
struct xhci_intr_reg {
- __le32 irq_pending;
- __le32 irq_control;
+ __le32 iman;
+ __le32 imod;
__le32 erst_size;
__le32 rsvd;
__le64 erst_base;
__le64 erst_dequeue;
};
-/* irq_pending bitmasks */
-#define ER_IRQ_PENDING(p) ((p) & 0x1)
-/* bits 2:31 need to be preserved */
-/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
-#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
-#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
-#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
-
-/* irq_control bitmasks */
-/* Minimum interval between interrupts (in 250ns intervals). The interval
- * between interrupts will be longer if there are no events on the event ring.
- * Default is 4000 (1 ms).
+/* iman bitmasks */
+/* bit 0 - Interrupt Pending (IP), whether there is an interrupt pending. Write-1-to-clear. */
+#define IMAN_IP (1 << 0)
+/* bit 1 - Interrupt Enable (IE), whether the interrupter is capable of generating an interrupt */
+#define IMAN_IE (1 << 1)
+
+/* imod bitmasks */
+/*
+ * bits 15:0 - Interrupt Moderation Interval, the minimum interval between interrupts
+ * (in 250ns intervals). The interval between interrupts will be longer if there are no
+ * events on the event ring. Default is 4000 (1 ms).
*/
-#define ER_IRQ_INTERVAL_MASK (0xffff)
-/* Counter used to count down the time to the next interrupt - HW use only */
-#define ER_IRQ_COUNTER_MASK (0xffff << 16)
+#define IMODI_MASK (0xffff)
+/* bits 31:16 - Interrupt Moderation Counter, used to count down the time to the next interrupt */
+#define IMODC_MASK (0xffff << 16)
/* erst_size bitmasks */
-/* Preserve bits 16:31 of erst_size */
-#define ERST_SIZE_MASK (0xffff << 16)
+/* bits 15:0 - Event Ring Segment Table Size, number of ERST entries */
+#define ERST_SIZE_MASK (0xffff)
/* erst_base bitmasks */
-#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0))
+/* bits 63:6 - Event Ring Segment Table Base Address Register */
+#define ERST_BASE_ADDRESS_MASK GENMASK_ULL(63, 6)
/* erst_dequeue bitmasks */
-/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
- * where the current dequeue pointer lies. This is an optional HW hint.
+/*
+ * bits 2:0 - Dequeue ERST Segment Index (DESI), is the segment number (or alias) where the
+ * current dequeue pointer lies. This is an optional HW hint.
*/
#define ERST_DESI_MASK (0x7)
-/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+/*
+ * bit 3 - Event Handler Busy (EHB), whether the event ring is scheduled to be serviced by
* a work queue (or delayed service routine)?
*/
#define ERST_EHB (1 << 3)
-#define ERST_PTR_MASK (GENMASK_ULL(63, 4))
+/* bits 63:4 - Event Ring Dequeue Pointer */
+#define ERST_PTR_MASK GENMASK_ULL(63, 4)
/**
* struct xhci_run_regs
@@ -589,6 +586,7 @@ struct xhci_stream_info {
#define SMALL_STREAM_ARRAY_SIZE 256
#define MEDIUM_STREAM_ARRAY_SIZE 1024
+#define GET_PORT_BW_ARRAY_SIZE 256
/* Some Intel xHCI host controllers need software to keep track of the bus
* bandwidth. Keep track of endpoint info here. Each root port is allocated
@@ -700,6 +698,8 @@ struct xhci_virt_ep {
int next_frame_id;
/* Use new Isoch TRB layout needed for extended TBC support */
bool use_extended_tbc;
+ /* set if this endpoint is controlled via sideband access*/
+ struct xhci_sideband *sideband;
};
enum xhci_overhead_type {
@@ -762,6 +762,8 @@ struct xhci_virt_device {
u16 current_mel;
/* Used for the debugfs interfaces. */
void *debugfs_private;
+ /* set if this endpoint is controlled via sideband access*/
+ struct xhci_sideband *sideband;
};
/*
@@ -1002,6 +1004,9 @@ enum xhci_setup_dev {
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
+/* bits 19:16 are the dev speed */
+#define DEV_SPEED_FOR_TRB(p) ((p) << 16)
+
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
@@ -1447,8 +1452,8 @@ struct xhci_interrupter {
bool ip_autoclear;
u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
- u32 s3_irq_pending;
- u32 s3_irq_control;
+ u32 s3_iman;
+ u32 s3_imod;
u32 s3_erst_size;
u64 s3_erst_base;
u64 s3_erst_dequeue;
@@ -1554,6 +1559,7 @@ struct xhci_hcd {
struct dma_pool *device_pool;
struct dma_pool *segment_pool;
struct dma_pool *small_streams_pool;
+ struct dma_pool *port_bw_pool;
struct dma_pool *medium_streams_pool;
/* Host controller watchdog timer structures */
@@ -1846,11 +1852,18 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags);
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
+struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci,
+ gfp_t flags);
+void xhci_free_port_bw_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
- u32 imod_interval);
+ u32 imod_interval, unsigned int intr_num);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
+void xhci_skip_sec_intr_events(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ struct xhci_interrupter *ir);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
@@ -1891,7 +1904,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval);
int xhci_enable_interrupter(struct xhci_interrupter *ir);
-int xhci_disable_interrupter(struct xhci_interrupter *ir);
+int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1916,6 +1929,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
bool command_must_succeed);
+int xhci_queue_get_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, bool command_must_succeed);
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ u8 dev_speed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
@@ -1936,6 +1954,10 @@ unsigned int count_trbs(u64 addr, u64 len);
int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
int suspend, gfp_t gfp_flags);
void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir,
+ bool clear_ehb);
+void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,