mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
thunderbolt: Changes for v6.9 merge window
This includes following USB4/Thunderbolt changes for the v6.9 merge window: - Reset the topology also for USB4 v1 routers on driver load - DisplayPort tunneling and bandwidth allocation mode improvements - Tracepoint support for the control channel - Couple of minor fixes and cleanups. All these have been in linux-next with no reported issues. -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmXgU5ogHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKCD/g/+O9AmkgQwyUr8 EoDHjFBRTomGMh/eR2g2JfEIpAj0hqRYDHxbSC5ZyWZWIaMuj7RiBBkU3aS+PioD KAWm46NB80Un83fO2gGV+t0cCfcTqxdU2gtsK9TEAydkKctJ+HK+htMvJUaorJV0 35uPjCwuAWPrb0MINNysKbRy/QLrCU0+EF6Zl/1Kdg2M7DPNT15He4UTbwgFuIhp aEyRz6221yA6FJB9oV7W7ajSnA7v4o8RmyAHN2yYHuHwAsvRaFFvuB2PMQLuA0G1 +HQLoZE/mse/2hwsGIk01peHkJd5nkntsOP4DvOAig7QV8A8INP29nh+5lGN+m0C UN8TuFjpIMe3tfr5H8dosGoJH8o3+h22F2OdcGai0r5oCtAufv8OzOWNkEJACXcU 372raqDW7SXpSx0UdkHbJivOBG1xAMkqZxqV18SI5uDMEcxAw1kVp3J7+KDRfJyk p9mgElW7qj20LQ7dF1kyWVu7TO12xRSGlbIHbGnXCDhMS3ZhKVwrOAXxnRGf0XBc IHcPlQUTzjlc0n7MSzVQdFaeBLgtrqk23SGMPgxfgqPigtJCpi8E0E1d5SMyAiqE YrnTkf+RY5vrfv8denKSWr0LTFFqdGq5asjoFhEWffxbMvaxi1y8v32haT+ddUFi UYIV3yz419c60aiuEfswSg0J0TpvF44= =9hHe -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next Mika writes: thunderbolt: Changes for v6.9 merge window This includes following USB4/Thunderbolt changes for the v6.9 merge window: - Reset the topology also for USB4 v1 routers on driver load - DisplayPort tunneling and bandwidth allocation mode improvements - Tracepoint support for the control channel - Couple of minor fixes and cleanups. All these have been in linux-next with no reported issues. * tag 'thunderbolt-for-v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (23 commits) thunderbolt: Constify the struct device_type usage thunderbolt: Add trace events support for the control channel thunderbolt: Keep the domain powered when USB4 port is in redrive mode thunderbolt: Improve DisplayPort tunnel setup process to be more robust thunderbolt: Calculate DisplayPort tunnel bandwidth after DPRX capabilities read thunderbolt: Reserve released DisplayPort bandwidth for a group for 10 seconds thunderbolt: Introduce tb_tunnel_direction_downstream() thunderbolt: Re-order bandwidth group functions thunderbolt: Fail the failed bandwidth request properly thunderbolt: Log an error if DPTX request is not cleared thunderbolt: Handle bandwidth allocation mode disable request thunderbolt: Re-calculate estimated bandwidth when allocation mode is enabled thunderbolt: Use DP_LOCAL_CAP for maximum bandwidth calculation thunderbolt: Correct typo in host_reset parameter thunderbolt: Skip discovery also in USB4 v2 host thunderbolt: Reset only non-USB4 host routers in resume thunderbolt: Remove usage of the deprecated ida_simple_xx() API thunderbolt: Fix rollback in tb_port_lane_bonding_enable() for lane 1 thunderbolt: Fix XDomain rx_lanes_show and tx_lanes_show thunderbolt: Reset topology created by the boot firmware ...
This commit is contained in:
commit
73473b3033
@ -1,4 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
ccflags-y := -I$(src)
|
||||
obj-${CONFIG_USB4} := thunderbolt.o
|
||||
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
|
||||
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
|
||||
|
@ -15,6 +15,8 @@
|
||||
|
||||
#include "ctl.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
#define TB_CTL_RX_PKG_COUNT 10
|
||||
#define TB_CTL_RETRIES 4
|
||||
@ -32,6 +34,7 @@
|
||||
* @timeout_msec: Default timeout for non-raw control messages
|
||||
* @callback: Callback called when hotplug message is received
|
||||
* @callback_data: Data passed to @callback
|
||||
* @index: Domain number. This will be output with the trace record.
|
||||
*/
|
||||
struct tb_ctl {
|
||||
struct tb_nhi *nhi;
|
||||
@ -47,6 +50,8 @@ struct tb_ctl {
|
||||
int timeout_msec;
|
||||
event_cb callback;
|
||||
void *callback_data;
|
||||
|
||||
int index;
|
||||
};
|
||||
|
||||
|
||||
@ -369,6 +374,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
|
||||
pkg->frame.size = len + 4;
|
||||
pkg->frame.sof = type;
|
||||
pkg->frame.eof = type;
|
||||
|
||||
trace_tb_tx(ctl->index, type, data, len);
|
||||
|
||||
cpu_to_be32_array(pkg->buffer, data, len / 4);
|
||||
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
|
||||
|
||||
@ -384,6 +392,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
|
||||
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
|
||||
struct ctl_pkg *pkg, size_t size)
|
||||
{
|
||||
trace_tb_event(ctl->index, type, pkg->buffer, size);
|
||||
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
|
||||
}
|
||||
|
||||
@ -489,6 +498,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
|
||||
* triggered from messing with the active requests.
|
||||
*/
|
||||
req = tb_cfg_request_find(pkg->ctl, pkg);
|
||||
|
||||
trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
|
||||
|
||||
if (req) {
|
||||
if (req->copy(req, pkg))
|
||||
schedule_work(&req->work);
|
||||
@ -614,6 +626,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
|
||||
/**
|
||||
* tb_ctl_alloc() - allocate a control channel
|
||||
* @nhi: Pointer to NHI
|
||||
* @index: Domain number
|
||||
* @timeout_msec: Default timeout used with non-raw control messages
|
||||
* @cb: Callback called for plug events
|
||||
* @cb_data: Data passed to @cb
|
||||
@ -622,14 +635,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
|
||||
*
|
||||
* Return: Returns a pointer on success or NULL on failure.
|
||||
*/
|
||||
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
|
||||
void *cb_data)
|
||||
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
|
||||
event_cb cb, void *cb_data)
|
||||
{
|
||||
int i;
|
||||
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
|
||||
if (!ctl)
|
||||
return NULL;
|
||||
|
||||
ctl->nhi = nhi;
|
||||
ctl->index = index;
|
||||
ctl->timeout_msec = timeout_msec;
|
||||
ctl->callback = cb;
|
||||
ctl->callback_data = cb_data;
|
||||
|
@ -21,8 +21,8 @@ struct tb_ctl;
|
||||
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
|
||||
const void *buf, size_t size);
|
||||
|
||||
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
|
||||
void *cb_data);
|
||||
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
|
||||
event_cb cb, void *cb_data);
|
||||
void tb_ctl_start(struct tb_ctl *ctl);
|
||||
void tb_ctl_stop(struct tb_ctl *ctl);
|
||||
void tb_ctl_free(struct tb_ctl *ctl);
|
||||
|
@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
|
||||
|
||||
tb_ctl_free(tb->ctl);
|
||||
destroy_workqueue(tb->wq);
|
||||
ida_simple_remove(&tb_domain_ida, tb->index);
|
||||
ida_free(&tb_domain_ida, tb->index);
|
||||
mutex_destroy(&tb->lock);
|
||||
kfree(tb);
|
||||
}
|
||||
|
||||
struct device_type tb_domain_type = {
|
||||
const struct device_type tb_domain_type = {
|
||||
.name = "thunderbolt_domain",
|
||||
.release = tb_domain_release,
|
||||
};
|
||||
@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
||||
tb->nhi = nhi;
|
||||
mutex_init(&tb->lock);
|
||||
|
||||
tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
|
||||
tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
|
||||
if (tb->index < 0)
|
||||
goto err_free;
|
||||
|
||||
@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
||||
if (!tb->wq)
|
||||
goto err_remove_ida;
|
||||
|
||||
tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
|
||||
tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
|
||||
if (!tb->ctl)
|
||||
goto err_destroy_wq;
|
||||
|
||||
@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(tb->wq);
|
||||
err_remove_ida:
|
||||
ida_simple_remove(&tb_domain_ida, tb->index);
|
||||
ida_free(&tb_domain_ida, tb->index);
|
||||
err_free:
|
||||
kfree(tb);
|
||||
|
||||
@ -423,6 +423,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
||||
/**
|
||||
* tb_domain_add() - Add domain to the system
|
||||
* @tb: Domain to add
|
||||
* @reset: Issue reset to the host router
|
||||
*
|
||||
* Starts the domain and adds it to the system. Hotplugging devices will
|
||||
* work after this has been returned successfully. In order to remove
|
||||
@ -431,7 +432,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
*/
|
||||
int tb_domain_add(struct tb *tb)
|
||||
int tb_domain_add(struct tb *tb, bool reset)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
|
||||
|
||||
/* Start the domain */
|
||||
if (tb->cm_ops->start) {
|
||||
ret = tb->cm_ops->start(tb);
|
||||
ret = tb->cm_ops->start(tb, reset);
|
||||
if (ret)
|
||||
goto err_domain_del;
|
||||
}
|
||||
@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
|
||||
mutex_unlock(&tb->lock);
|
||||
|
||||
flush_workqueue(tb->wq);
|
||||
|
||||
if (tb->cm_ops->deinit)
|
||||
tb->cm_ops->deinit(tb);
|
||||
|
||||
device_unregister(&tb->dev);
|
||||
}
|
||||
|
||||
|
@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int icm_start(struct tb *tb)
|
||||
static int icm_start(struct tb *tb, bool not_used)
|
||||
{
|
||||
struct icm *icm = tb_priv(tb);
|
||||
int ret;
|
||||
|
@ -6,6 +6,8 @@
|
||||
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
/**
|
||||
@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
|
||||
return sw->cap_lc + start + phys * size;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_lc_reset_port() - Trigger downstream port reset through LC
|
||||
* @port: Port that is reset
|
||||
*
|
||||
* Triggers downstream port reset through link controller registers.
|
||||
* Returns %0 in case of success negative errno otherwise. Only supports
|
||||
* non-USB4 routers with link controller (that's Thunderbolt 2 and
|
||||
* Thunderbolt 3).
|
||||
*/
|
||||
int tb_lc_reset_port(struct tb_port *port)
|
||||
{
|
||||
struct tb_switch *sw = port->sw;
|
||||
int cap, ret;
|
||||
u32 mode;
|
||||
|
||||
if (sw->generation < 2)
|
||||
return -EINVAL;
|
||||
|
||||
cap = find_port_lc_cap(port);
|
||||
if (cap < 0)
|
||||
return cap;
|
||||
|
||||
ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mode |= TB_LC_PORT_MODE_DPR;
|
||||
|
||||
ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fsleep(10000);
|
||||
|
||||
ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mode &= ~TB_LC_PORT_MODE_DPR;
|
||||
|
||||
return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
|
||||
}
|
||||
|
||||
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
|
||||
{
|
||||
bool upstream = tb_is_upstream_port(port);
|
||||
|
@ -48,7 +48,7 @@
|
||||
|
||||
static bool host_reset = true;
|
||||
module_param(host_reset, bool, 0444);
|
||||
MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
|
||||
MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
|
||||
|
||||
static int ring_interrupt_index(const struct tb_ring *ring)
|
||||
{
|
||||
@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
|
||||
if (!nhi->pdev->msix_enabled)
|
||||
return 0;
|
||||
|
||||
ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
|
||||
ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
|
||||
return 0;
|
||||
|
||||
err_ida_remove:
|
||||
ida_simple_remove(&nhi->msix_ida, ring->vector);
|
||||
ida_free(&nhi->msix_ida, ring->vector);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
|
||||
return;
|
||||
|
||||
free_irq(ring->irq, ring);
|
||||
ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
|
||||
ida_free(&ring->nhi->msix_ida, ring->vector);
|
||||
ring->vector = 0;
|
||||
ring->irq = 0;
|
||||
}
|
||||
@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
nhi_check_quirks(nhi);
|
||||
nhi_check_iommu(nhi);
|
||||
|
||||
nhi_reset(nhi);
|
||||
|
||||
res = nhi_init_msi(nhi);
|
||||
@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
|
||||
|
||||
res = tb_domain_add(tb);
|
||||
res = tb_domain_add(tb, host_reset);
|
||||
if (res) {
|
||||
/*
|
||||
* At this point the RX/TX rings might already have been
|
||||
|
@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
|
||||
if (!nvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
|
||||
ret = ida_alloc(&nvm_ida, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
kfree(nvm);
|
||||
return ERR_PTR(ret);
|
||||
@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
|
||||
nvmem_unregister(nvm->non_active);
|
||||
nvmem_unregister(nvm->active);
|
||||
vfree(nvm->buf);
|
||||
ida_simple_remove(&nvm_ida, nvm->id);
|
||||
ida_free(&nvm_ida, nvm->id);
|
||||
}
|
||||
kfree(nvm);
|
||||
}
|
||||
|
@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_path_deactivate_hop() - Deactivate one path in path config space
|
||||
* @port: Lane or protocol adapter
|
||||
* @hop_index: HopID of the path to be cleared
|
||||
*
|
||||
* This deactivates or clears a single path config space entry at
|
||||
* @hop_index. Returns %0 in success and negative errno otherwise.
|
||||
*/
|
||||
int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
|
||||
{
|
||||
return __tb_path_deactivate_hop(port, hop_index, true);
|
||||
}
|
||||
|
||||
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
|
||||
{
|
||||
int i, res;
|
||||
|
@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
|
||||
}
|
||||
}
|
||||
|
||||
static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
|
||||
{
|
||||
sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
|
||||
tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
|
||||
}
|
||||
|
||||
struct tb_quirk {
|
||||
u16 hw_vendor_id;
|
||||
u16 hw_device_id;
|
||||
@ -86,6 +92,14 @@ static const struct tb_quirk tb_quirks[] = {
|
||||
quirk_usb3_maximum_bandwidth },
|
||||
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
|
||||
quirk_usb3_maximum_bandwidth },
|
||||
/*
|
||||
* Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
|
||||
* controllers.
|
||||
*/
|
||||
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
|
||||
quirk_block_rpm_in_redrive },
|
||||
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
|
||||
quirk_block_rpm_in_redrive },
|
||||
/*
|
||||
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
|
||||
*/
|
||||
|
@ -356,7 +356,7 @@ static void tb_retimer_release(struct device *dev)
|
||||
kfree(rt);
|
||||
}
|
||||
|
||||
struct device_type tb_retimer_type = {
|
||||
const struct device_type tb_retimer_type = {
|
||||
.name = "thunderbolt_retimer",
|
||||
.groups = retimer_groups,
|
||||
.release = tb_retimer_release,
|
||||
|
@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
|
||||
return __tb_port_enable(port, false);
|
||||
}
|
||||
|
||||
static int tb_port_reset(struct tb_port *port)
|
||||
{
|
||||
if (tb_switch_is_usb4(port->sw))
|
||||
return port->cap_usb4 ? usb4_port_reset(port) : 0;
|
||||
return tb_lc_reset_port(port);
|
||||
}
|
||||
|
||||
/*
|
||||
* tb_init_port() - initialize a port
|
||||
*
|
||||
@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
|
||||
if (max_hopid < 0 || max_hopid > port_max_hopid)
|
||||
max_hopid = port_max_hopid;
|
||||
|
||||
return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
|
||||
return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
|
||||
*/
|
||||
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
|
||||
{
|
||||
ida_simple_remove(&port->in_hopids, hopid);
|
||||
ida_free(&port->in_hopids, hopid);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
|
||||
*/
|
||||
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
|
||||
{
|
||||
ida_simple_remove(&port->out_hopids, hopid);
|
||||
ida_free(&port->out_hopids, hopid);
|
||||
}
|
||||
|
||||
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
|
||||
@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
|
||||
ret = tb_port_set_link_width(port->dual_link_port,
|
||||
TB_LINK_WIDTH_DUAL);
|
||||
if (ret)
|
||||
goto err_lane0;
|
||||
goto err_lane1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1531,29 +1538,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
|
||||
regs->__unknown1, regs->__unknown4);
|
||||
}
|
||||
|
||||
static int tb_switch_reset_host(struct tb_switch *sw)
|
||||
{
|
||||
if (sw->generation > 1) {
|
||||
struct tb_port *port;
|
||||
|
||||
tb_switch_for_each_port(sw, port) {
|
||||
int i, ret;
|
||||
|
||||
/*
|
||||
* For lane adapters we issue downstream port
|
||||
* reset and clear up path config spaces.
|
||||
*
|
||||
* For protocol adapters we disable the path and
|
||||
* clear path config space one by one (from 8 to
|
||||
* Max Input HopID of the adapter).
|
||||
*/
|
||||
if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
|
||||
ret = tb_port_reset(port);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (tb_port_is_usb3_down(port) ||
|
||||
tb_port_is_usb3_up(port)) {
|
||||
tb_usb3_port_enable(port, false);
|
||||
} else if (tb_port_is_dpin(port) ||
|
||||
tb_port_is_dpout(port)) {
|
||||
tb_dp_port_enable(port, false);
|
||||
} else if (tb_port_is_pcie_down(port) ||
|
||||
tb_port_is_pcie_up(port)) {
|
||||
tb_pci_port_enable(port, false);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Cleanup path config space of protocol adapter */
|
||||
for (i = TB_PATH_MIN_HOPID;
|
||||
i <= port->config.max_in_hop_id; i++) {
|
||||
ret = tb_path_deactivate_hop(port, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
struct tb_cfg_result res;
|
||||
|
||||
/* Thunderbolt 1 uses the "reset" config space packet */
|
||||
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
|
||||
TB_CFG_SWITCH, 2, 2);
|
||||
if (res.err)
|
||||
return res.err;
|
||||
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
|
||||
if (res.err > 0)
|
||||
return -EIO;
|
||||
else if (res.err < 0)
|
||||
return res.err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_switch_reset_device(struct tb_switch *sw)
|
||||
{
|
||||
return tb_port_reset(tb_switch_downstream_port(sw));
|
||||
}
|
||||
|
||||
static bool tb_switch_enumerated(struct tb_switch *sw)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Read directly from the hardware because we use this also
|
||||
* during system sleep where sw->config.enabled is already set
|
||||
* by us.
|
||||
*/
|
||||
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
return !!(val & ROUTER_CS_3_V);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
|
||||
* @sw: Switch to reset
|
||||
* tb_switch_reset() - Perform reset to the router
|
||||
* @sw: Router to reset
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Issues reset to the router @sw. Can be used for any router. For host
|
||||
* routers, resets all the downstream ports and cleans up path config
|
||||
* spaces accordingly. For device routers issues downstream port reset
|
||||
* through the parent router, so as side effect there will be unplug
|
||||
* soon after this is finished.
|
||||
*
|
||||
* If the router is not enumerated does nothing.
|
||||
*
|
||||
* Returns %0 on success or negative errno in case of failure.
|
||||
*/
|
||||
int tb_switch_reset(struct tb_switch *sw)
|
||||
{
|
||||
struct tb_cfg_result res;
|
||||
int ret;
|
||||
|
||||
if (sw->generation > 1)
|
||||
/*
|
||||
* We cannot access the port config spaces unless the router is
|
||||
* already enumerated. If the router is not enumerated it is
|
||||
* equal to being reset so we can skip that here.
|
||||
*/
|
||||
if (!tb_switch_enumerated(sw))
|
||||
return 0;
|
||||
|
||||
tb_sw_dbg(sw, "resetting switch\n");
|
||||
tb_sw_dbg(sw, "resetting\n");
|
||||
|
||||
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
|
||||
TB_CFG_SWITCH, 2, 2);
|
||||
if (res.err)
|
||||
return res.err;
|
||||
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
|
||||
if (res.err > 0)
|
||||
return -EIO;
|
||||
return res.err;
|
||||
if (tb_route(sw))
|
||||
ret = tb_switch_reset_device(sw);
|
||||
else
|
||||
ret = tb_switch_reset_host(sw);
|
||||
|
||||
if (ret)
|
||||
tb_sw_warn(sw, "failed to reset\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2225,7 +2327,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
|
||||
NULL)
|
||||
};
|
||||
|
||||
struct device_type tb_switch_type = {
|
||||
const struct device_type tb_switch_type = {
|
||||
.name = "thunderbolt_device",
|
||||
.release = tb_switch_release,
|
||||
.uevent = tb_switch_uevent,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,6 +23,8 @@
|
||||
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
|
||||
/* Disable CLx if not supported */
|
||||
#define QUIRK_NO_CLX BIT(1)
|
||||
/* Need to keep power on while USB4 port is in redrive mode */
|
||||
#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
|
||||
|
||||
/**
|
||||
* struct tb_nvm - Structure holding NVM information
|
||||
@ -217,6 +219,11 @@ struct tb_switch {
|
||||
* @tb: Pointer to the domain the group belongs to
|
||||
* @index: Index of the group (aka Group_ID). Valid values %1-%7
|
||||
* @ports: DP IN adapters belonging to this group are linked here
|
||||
* @reserved: Bandwidth released by one tunnel in the group, available
|
||||
* to others. This is reported as part of estimated_bw for
|
||||
* the group.
|
||||
* @release_work: Worker to release the @reserved if it is not used by
|
||||
* any of the tunnels.
|
||||
*
|
||||
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
|
||||
* attached to a bandwidth group. All tunnels going through the same
|
||||
@ -227,6 +234,8 @@ struct tb_bandwidth_group {
|
||||
struct tb *tb;
|
||||
int index;
|
||||
struct list_head ports;
|
||||
int reserved;
|
||||
struct delayed_work release_work;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -258,6 +267,7 @@ struct tb_bandwidth_group {
|
||||
* @group_list: The adapter is linked to the group's list of ports through this
|
||||
* @max_bw: Maximum possible bandwidth through this adapter if set to
|
||||
* non-zero.
|
||||
* @redrive: For DP IN, if true the adapter is in redrive mode.
|
||||
*
|
||||
* In USB4 terminology this structure represents an adapter (protocol or
|
||||
* lane adapter).
|
||||
@ -286,6 +296,7 @@ struct tb_port {
|
||||
struct tb_bandwidth_group *group;
|
||||
struct list_head group_list;
|
||||
unsigned int max_bw;
|
||||
bool redrive;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -452,6 +463,8 @@ struct tb_path {
|
||||
* ICM to send driver ready message to the firmware.
|
||||
* @start: Starts the domain
|
||||
* @stop: Stops the domain
|
||||
* @deinit: Perform any cleanup after the domain is stopped but before
|
||||
* it is unregistered. Called without @tb->lock taken. Optional.
|
||||
* @suspend_noirq: Connection manager specific suspend_noirq
|
||||
* @resume_noirq: Connection manager specific resume_noirq
|
||||
* @suspend: Connection manager specific suspend
|
||||
@ -483,8 +496,9 @@ struct tb_path {
|
||||
*/
|
||||
struct tb_cm_ops {
|
||||
int (*driver_ready)(struct tb *tb);
|
||||
int (*start)(struct tb *tb);
|
||||
int (*start)(struct tb *tb, bool reset);
|
||||
void (*stop)(struct tb *tb);
|
||||
void (*deinit)(struct tb *tb);
|
||||
int (*suspend_noirq)(struct tb *tb);
|
||||
int (*resume_noirq)(struct tb *tb);
|
||||
int (*suspend)(struct tb *tb);
|
||||
@ -735,10 +749,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
|
||||
struct tb *icm_probe(struct tb_nhi *nhi);
|
||||
struct tb *tb_probe(struct tb_nhi *nhi);
|
||||
|
||||
extern struct device_type tb_domain_type;
|
||||
extern struct device_type tb_retimer_type;
|
||||
extern struct device_type tb_switch_type;
|
||||
extern struct device_type usb4_port_device_type;
|
||||
extern const struct device_type tb_domain_type;
|
||||
extern const struct device_type tb_retimer_type;
|
||||
extern const struct device_type tb_switch_type;
|
||||
extern const struct device_type usb4_port_device_type;
|
||||
|
||||
int tb_domain_init(void);
|
||||
void tb_domain_exit(void);
|
||||
@ -746,7 +760,7 @@ int tb_xdomain_init(void);
|
||||
void tb_xdomain_exit(void);
|
||||
|
||||
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
|
||||
int tb_domain_add(struct tb *tb);
|
||||
int tb_domain_add(struct tb *tb, bool reset);
|
||||
void tb_domain_remove(struct tb *tb);
|
||||
int tb_domain_suspend_noirq(struct tb *tb);
|
||||
int tb_domain_resume_noirq(struct tb *tb);
|
||||
@ -1150,6 +1164,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
|
||||
void tb_path_free(struct tb_path *path);
|
||||
int tb_path_activate(struct tb_path *path);
|
||||
void tb_path_deactivate(struct tb_path *path);
|
||||
int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
|
||||
bool tb_path_is_invalid(struct tb_path *path);
|
||||
bool tb_path_port_on_path(const struct tb_path *path,
|
||||
const struct tb_port *port);
|
||||
@ -1169,6 +1184,7 @@ int tb_drom_read(struct tb_switch *sw);
|
||||
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
|
||||
|
||||
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
|
||||
int tb_lc_reset_port(struct tb_port *port);
|
||||
int tb_lc_configure_port(struct tb_port *port);
|
||||
void tb_lc_unconfigure_port(struct tb_port *port);
|
||||
int tb_lc_configure_xdomain(struct tb_port *port);
|
||||
@ -1301,6 +1317,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
|
||||
|
||||
int usb4_port_unlock(struct tb_port *port);
|
||||
int usb4_port_hotplug_enable(struct tb_port *port);
|
||||
int usb4_port_reset(struct tb_port *port);
|
||||
int usb4_port_configure(struct tb_port *port);
|
||||
void usb4_port_unconfigure(struct tb_port *port);
|
||||
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
|
||||
|
@ -194,6 +194,8 @@ struct tb_regs_switch_header {
|
||||
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
|
||||
|
||||
#define ROUTER_CS_1 0x01
|
||||
#define ROUTER_CS_3 0x03
|
||||
#define ROUTER_CS_3_V BIT(31)
|
||||
#define ROUTER_CS_4 0x04
|
||||
/* Used with the router cmuv field */
|
||||
#define ROUTER_CS_4_CMUV_V1 0x10
|
||||
@ -389,6 +391,7 @@ struct tb_regs_port_header {
|
||||
#define PORT_CS_18_CSA BIT(22)
|
||||
#define PORT_CS_18_TIP BIT(24)
|
||||
#define PORT_CS_19 0x13
|
||||
#define PORT_CS_19_DPR BIT(0)
|
||||
#define PORT_CS_19_PC BIT(3)
|
||||
#define PORT_CS_19_PID BIT(4)
|
||||
#define PORT_CS_19_WOC BIT(16)
|
||||
@ -584,6 +587,9 @@ struct tb_regs_hop {
|
||||
#define TB_LC_POWER 0x740
|
||||
|
||||
/* Link controller registers */
|
||||
#define TB_LC_PORT_MODE 0x26
|
||||
#define TB_LC_PORT_MODE_DPR BIT(0)
|
||||
|
||||
#define TB_LC_CS_42 0x2a
|
||||
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
|
||||
|
||||
|
188
drivers/thunderbolt/trace.h
Normal file
188
drivers/thunderbolt/trace.h
Normal file
@ -0,0 +1,188 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Thunderbolt tracing support
|
||||
*
|
||||
* Copyright (C) 2024, Intel Corporation
|
||||
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||
* Gil Fine <gil.fine@intel.com>
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM thunderbolt
|
||||
|
||||
#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define TB_TRACE_H_
|
||||
|
||||
#include <linux/trace_seq.h>
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#include "tb_msgs.h"
|
||||
|
||||
#define tb_cfg_type_name(type) { type, #type }
|
||||
#define show_type_name(val) \
|
||||
__print_symbolic(val, \
|
||||
tb_cfg_type_name(TB_CFG_PKG_READ), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_WRITE), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_ERROR), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_EVENT), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_RESET), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
|
||||
tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
|
||||
|
||||
#ifndef TB_TRACE_HELPERS
|
||||
#define TB_TRACE_HELPERS
|
||||
static inline const char *show_data_read_write(struct trace_seq *p,
|
||||
const u32 *data)
|
||||
{
|
||||
const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
|
||||
msg->addr.offset, msg->addr.length, msg->addr.port,
|
||||
msg->addr.space, msg->addr.seq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
|
||||
{
|
||||
const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
|
||||
msg->port, msg->pg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
|
||||
{
|
||||
const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline const char *show_route(struct trace_seq *p, const u32 *data)
|
||||
{
|
||||
const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline const char *show_data(struct trace_seq *p, u8 type,
|
||||
const u32 *data, u32 length)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
const char *prefix = "";
|
||||
int i;
|
||||
|
||||
show_route(p, data);
|
||||
|
||||
switch (type) {
|
||||
case TB_CFG_PKG_READ:
|
||||
case TB_CFG_PKG_WRITE:
|
||||
show_data_read_write(p, data);
|
||||
break;
|
||||
|
||||
case TB_CFG_PKG_ERROR:
|
||||
show_data_error(p, data);
|
||||
break;
|
||||
|
||||
case TB_CFG_PKG_EVENT:
|
||||
show_data_event(p, data);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
trace_seq_printf(p, "data=[");
|
||||
for (i = 0; i < length; i++) {
|
||||
trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
|
||||
prefix = ", ";
|
||||
}
|
||||
trace_seq_printf(p, "]");
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
DECLARE_EVENT_CLASS(tb_raw,
|
||||
TP_PROTO(int index, u8 type, const void *data, size_t size),
|
||||
TP_ARGS(index, type, data, size),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, index)
|
||||
__field(u8, type)
|
||||
__field(size_t, size)
|
||||
__dynamic_array(u32, data, size / 4)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->index = index;
|
||||
__entry->type = type;
|
||||
__entry->size = size / 4;
|
||||
memcpy(__get_dynamic_array(data), data, size);
|
||||
),
|
||||
TP_printk("type=%s, size=%zd, domain=%d, %s",
|
||||
show_type_name(__entry->type), __entry->size, __entry->index,
|
||||
show_data(p, __entry->type, __get_dynamic_array(data),
|
||||
__entry->size)
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(tb_raw, tb_tx,
|
||||
TP_PROTO(int index, u8 type, const void *data, size_t size),
|
||||
TP_ARGS(index, type, data, size)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(tb_raw, tb_event,
|
||||
TP_PROTO(int index, u8 type, const void *data, size_t size),
|
||||
TP_ARGS(index, type, data, size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(tb_rx,
|
||||
TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
|
||||
TP_ARGS(index, type, data, size, dropped),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, index)
|
||||
__field(u8, type)
|
||||
__field(size_t, size)
|
||||
__dynamic_array(u32, data, size / 4)
|
||||
__field(bool, dropped)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->index = index;
|
||||
__entry->type = type;
|
||||
__entry->size = size / 4;
|
||||
memcpy(__get_dynamic_array(data), data, size);
|
||||
__entry->dropped = dropped;
|
||||
),
|
||||
TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
|
||||
show_type_name(__entry->type), __entry->dropped,
|
||||
__entry->size, __entry->index,
|
||||
show_data(p, __entry->type, __get_dynamic_array(data),
|
||||
__entry->size)
|
||||
)
|
||||
);
|
||||
|
||||
#endif /* TB_TRACE_H_ */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
|
||||
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
|
||||
out_rate, out_lanes, bw);
|
||||
|
||||
if (tb_port_path_direction_downstream(in, out))
|
||||
if (tb_tunnel_direction_downstream(tunnel))
|
||||
max_bw = tunnel->max_down;
|
||||
else
|
||||
max_bw = tunnel->max_up;
|
||||
@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
|
||||
* max_up/down fields. For discovery we just read what the
|
||||
* estimation was set to.
|
||||
*/
|
||||
if (tb_port_path_direction_downstream(in, out))
|
||||
if (tb_tunnel_direction_downstream(tunnel))
|
||||
estimated_bw = tunnel->max_down;
|
||||
else
|
||||
estimated_bw = tunnel->max_up;
|
||||
@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* max_bw is rounded up to next granularity */
|
||||
/**
|
||||
* tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
|
||||
* @tunnel: DP tunnel to check
|
||||
* @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
|
||||
*
|
||||
* Returns maximum possible bandwidth for this tunnel in Mb/s.
|
||||
*/
|
||||
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *max_bw)
|
||||
int *max_bw_rounded)
|
||||
{
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
int ret, rate, lanes, nrd_bw;
|
||||
int ret, rate, lanes, max_bw;
|
||||
u32 cap;
|
||||
|
||||
/*
|
||||
@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
|
||||
return ret;
|
||||
|
||||
rate = tb_dp_cap_get_rate_ext(cap);
|
||||
if (tb_dp_is_uhbr_rate(rate)) {
|
||||
/*
|
||||
* When UHBR is used there is no reduction in lanes so
|
||||
* we can use this directly.
|
||||
*/
|
||||
lanes = tb_dp_cap_get_lanes(cap);
|
||||
} else {
|
||||
/*
|
||||
* If there is no UHBR supported then check the
|
||||
* non-reduced rate and lanes.
|
||||
*/
|
||||
ret = usb4_dp_port_nrd(in, &rate, &lanes);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
lanes = tb_dp_cap_get_lanes(cap);
|
||||
|
||||
nrd_bw = tb_dp_bandwidth(rate, lanes);
|
||||
max_bw = tb_dp_bandwidth(rate, lanes);
|
||||
|
||||
if (max_bw) {
|
||||
if (max_bw_rounded) {
|
||||
ret = usb4_dp_port_granularity(in);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*max_bw = roundup(nrd_bw, ret);
|
||||
*max_bw_rounded = roundup(max_bw, ret);
|
||||
}
|
||||
|
||||
return nrd_bw;
|
||||
return max_bw;
|
||||
}
|
||||
|
||||
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *consumed_up,
|
||||
int *consumed_down)
|
||||
{
|
||||
struct tb_port *out = tunnel->dst_port;
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
int ret, allocated_bw, max_bw;
|
||||
int ret, allocated_bw, max_bw_rounded;
|
||||
|
||||
if (!usb4_dp_port_bandwidth_mode_enabled(in))
|
||||
return -EOPNOTSUPP;
|
||||
@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
|
||||
return ret;
|
||||
allocated_bw = ret;
|
||||
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (allocated_bw == max_bw)
|
||||
if (allocated_bw == max_bw_rounded)
|
||||
allocated_bw = ret;
|
||||
|
||||
if (tb_port_path_direction_downstream(in, out)) {
|
||||
if (tb_tunnel_direction_downstream(tunnel)) {
|
||||
*consumed_up = 0;
|
||||
*consumed_down = allocated_bw;
|
||||
} else {
|
||||
@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
|
||||
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
|
||||
int *allocated_down)
|
||||
{
|
||||
struct tb_port *out = tunnel->dst_port;
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
|
||||
/*
|
||||
@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
|
||||
* Otherwise we read it from the DPRX.
|
||||
*/
|
||||
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
|
||||
int ret, allocated_bw, max_bw;
|
||||
int ret, allocated_bw, max_bw_rounded;
|
||||
|
||||
ret = usb4_dp_port_allocated_bandwidth(in);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
allocated_bw = ret;
|
||||
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
|
||||
&max_bw_rounded);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (allocated_bw == max_bw)
|
||||
if (allocated_bw == max_bw_rounded)
|
||||
allocated_bw = ret;
|
||||
|
||||
if (tb_port_path_direction_downstream(in, out)) {
|
||||
if (tb_tunnel_direction_downstream(tunnel)) {
|
||||
*allocated_up = 0;
|
||||
*allocated_down = allocated_bw;
|
||||
} else {
|
||||
@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
|
||||
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
|
||||
int *alloc_down)
|
||||
{
|
||||
struct tb_port *out = tunnel->dst_port;
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
int max_bw, ret, tmp;
|
||||
int max_bw_rounded, ret, tmp;
|
||||
|
||||
if (!usb4_dp_port_bandwidth_mode_enabled(in))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (tb_port_path_direction_downstream(in, out)) {
|
||||
tmp = min(*alloc_down, max_bw);
|
||||
if (tb_tunnel_direction_downstream(tunnel)) {
|
||||
tmp = min(*alloc_down, max_bw_rounded);
|
||||
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
|
||||
if (ret)
|
||||
return ret;
|
||||
*alloc_down = tmp;
|
||||
*alloc_up = 0;
|
||||
} else {
|
||||
tmp = min(*alloc_up, max_bw);
|
||||
tmp = min(*alloc_up, max_bw_rounded);
|
||||
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
|
||||
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
|
||||
int *max_down)
|
||||
{
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
int ret;
|
||||
|
||||
if (!usb4_dp_port_bandwidth_mode_enabled(in))
|
||||
if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
|
||||
if (tb_tunnel_direction_downstream(tunnel)) {
|
||||
*max_up = 0;
|
||||
*max_down = ret;
|
||||
} else {
|
||||
@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
|
||||
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down)
|
||||
{
|
||||
struct tb_port *in = tunnel->src_port;
|
||||
const struct tb_switch *sw = in->sw;
|
||||
const struct tb_switch *sw = tunnel->src_port->sw;
|
||||
u32 rate = 0, lanes = 0;
|
||||
int ret;
|
||||
|
||||
@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
/*
|
||||
* Then see if the DPRX negotiation is ready and if yes
|
||||
* return that bandwidth (it may be smaller than the
|
||||
* reduced one). Otherwise return the remote (possibly
|
||||
* reduced) caps.
|
||||
* reduced one). According to VESA spec, the DPRX
|
||||
* negotiation shall compete in 5 seconds after tunnel
|
||||
* established. We give it 100ms extra just in case.
|
||||
*/
|
||||
ret = tb_dp_wait_dprx(tunnel, 150);
|
||||
if (ret) {
|
||||
if (ret == -ETIMEDOUT)
|
||||
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
|
||||
&rate, &lanes);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = tb_dp_wait_dprx(tunnel, 5100);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
|
||||
if (tb_tunnel_direction_downstream(tunnel)) {
|
||||
*consumed_up = 0;
|
||||
*consumed_down = tb_dp_bandwidth(rate, lanes);
|
||||
} else {
|
||||
|
@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
|
||||
return tunnel->type == TB_TUNNEL_USB3;
|
||||
}
|
||||
|
||||
static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
|
||||
{
|
||||
return tb_port_path_direction_downstream(tunnel->src_port,
|
||||
tunnel->dst_port);
|
||||
}
|
||||
|
||||
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
|
||||
|
||||
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
|
||||
|
@ -1113,6 +1113,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
|
||||
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb4_port_reset() - Issue downstream port reset
|
||||
* @port: USB4 port to reset
|
||||
*
|
||||
* Issues downstream port reset to @port.
|
||||
*/
|
||||
int usb4_port_reset(struct tb_port *port)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (!port->cap_usb4)
|
||||
return -EINVAL;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_19, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val |= PORT_CS_19_DPR;
|
||||
|
||||
ret = tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_19, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fsleep(10000);
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_19, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val &= ~PORT_CS_19_DPR;
|
||||
|
||||
return tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_usb4 + PORT_CS_19, 1);
|
||||
}
|
||||
|
||||
static int usb4_port_set_configured(struct tb_port *port, bool configured)
|
||||
{
|
||||
int ret;
|
||||
@ -2819,8 +2858,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
|
||||
usleep_range(50, 100);
|
||||
} while (ktime_before(ktime_get(), end));
|
||||
|
||||
if (val & ADP_DP_CS_8_DR)
|
||||
if (val & ADP_DP_CS_8_DR) {
|
||||
tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_adap + ADP_DP_CS_2, 1);
|
||||
|
@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
|
||||
kfree(usb4);
|
||||
}
|
||||
|
||||
struct device_type usb4_port_device_type = {
|
||||
const struct device_type usb4_port_device_type = {
|
||||
.name = "usb4_port",
|
||||
.groups = usb4_port_device_groups,
|
||||
.release = usb4_port_device_release,
|
||||
|
@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
|
||||
struct tb_xdomain *xd = tb_service_parent(svc);
|
||||
|
||||
tb_service_debugfs_remove(svc);
|
||||
ida_simple_remove(&xd->service_ids, svc->id);
|
||||
ida_free(&xd->service_ids, svc->id);
|
||||
kfree(svc->key);
|
||||
kfree(svc);
|
||||
}
|
||||
|
||||
struct device_type tb_service_type = {
|
||||
const struct device_type tb_service_type = {
|
||||
.name = "thunderbolt_service",
|
||||
.groups = tb_service_attr_groups,
|
||||
.uevent = tb_service_uevent,
|
||||
@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
|
||||
break;
|
||||
}
|
||||
|
||||
id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
|
||||
id = ida_alloc(&xd->service_ids, GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
kfree(svc->key);
|
||||
kfree(svc);
|
||||
@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
switch (xd->link_width) {
|
||||
case TB_LINK_WIDTH_SINGLE:
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
width = 1;
|
||||
break;
|
||||
case TB_LINK_WIDTH_DUAL:
|
||||
width = 2;
|
||||
break;
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
width = 3;
|
||||
break;
|
||||
default:
|
||||
@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
switch (xd->link_width) {
|
||||
case TB_LINK_WIDTH_SINGLE:
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
width = 1;
|
||||
break;
|
||||
case TB_LINK_WIDTH_DUAL:
|
||||
width = 2;
|
||||
break;
|
||||
case TB_LINK_WIDTH_ASYM_RX:
|
||||
case TB_LINK_WIDTH_ASYM_TX:
|
||||
width = 3;
|
||||
break;
|
||||
default:
|
||||
@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
|
||||
};
|
||||
|
||||
struct device_type tb_xdomain_type = {
|
||||
const struct device_type tb_xdomain_type = {
|
||||
.name = "thunderbolt_xdomain",
|
||||
.release = tb_xdomain_release,
|
||||
.pm = &tb_xdomain_pm_ops,
|
||||
|
@ -87,8 +87,8 @@ struct tb {
|
||||
};
|
||||
|
||||
extern const struct bus_type tb_bus_type;
|
||||
extern struct device_type tb_service_type;
|
||||
extern struct device_type tb_xdomain_type;
|
||||
extern const struct device_type tb_service_type;
|
||||
extern const struct device_type tb_xdomain_type;
|
||||
|
||||
#define TB_LINKS_PER_PHY_PORT 2
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user