mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
thunderbolt: Changes for v5.19 merge window
This includes following Thunderbolt/USB4 changes for the v5.19 merge window: * Improvements for Thunderbolt 1 DisplayPort tunneling * Link USB4 ports to their USB Type-C connectors * Lane bonding support for host-to-host (XDomain) connections * Buffer allocation improvement for devices with no DisplayPort adapters * Few cleanups and minor fixes. All these have been in linux-next with no reported issues except that there is a minor merge conflict with the kunit-next tree because one of the commits touches the driver KUnit tests. -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmKDb1UgHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKCU8hAAlsj3PujQLBIt 51KiwFV7P0DG8eiDUhAqYYFn4P49z0T5LPEYsyrfDoX9NjwYe+As/PaPNhgSCWMk nEEhH2iepJILzfDrDML+JQEn+3UhdIfISgtzaNodNqHbpppFWoYKHrB5x5lacg42 u0HWt954deMw9tVnWJRHmP8jhxp1anvJd+ZNq+xwLdem10y0Cd0ta1cFWZvCsQ+U LHTxv2NOK+2AntLyii8tJ85KgmnFzEhh4N8ckjgnb2pbzqx9jtYP5s3UoUIoY9NP obAy84Rknpevug3lFF7oyBDsYD3Nonxno8MLNybIG4/iKtAGAZN3NS4vtY66invQ fIu1ZzNvsaj3K/VG3GRJq4gCXE4o0Vv0Njmh53tQxtL4KaHzMkP9LeuBORtWG6Ku zPwH1sYPwqorsh+CmYHaztUy+uVKhZ7pk2RU2QoKXpRnfXrOs8+iFwvjWiCmZwU5 CE+M+ClbNAfZECFFctMq8uq9RGG58guTG814Zgr8oqUMNU3mnE7E8caRc9l5sx5Z Jeruo3Fba99Q6KhheGqM7yRzbLvMymqWlyAus1j51MWdbYoMJkMdXR/CFCG12Ee9 MR6wqWCK5rSR47v0/1HEZZ0TJutMzUm4Bm3OVswBB25DDbxWeC5vy2hXzRiT6vDJ 9R6ozF6efuR0nfSvuknoL9N3x/jM/QQ= =gccx -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next Mika writes: thunderbolt: Changes for v5.19 merge window This includes following Thunderbolt/USB4 changes for the v5.19 merge window: * Improvements for Thunderbolt 1 DisplayPort tunneling * Link USB4 ports to their USB Type-C connectors * Lane bonding support for host-to-host (XDomain) connections * Buffer allocation improvement for devices with no DisplayPort adapters * Few cleanups and minor fixes. All these have been in linux-next with no reported issues except that there is a minor merge conflict with the kunit-next tree because one of the commits touches the driver KUnit tests. * tag 'thunderbolt-for-v5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: thunderbolt: Add KUnit test for devices with no DisplayPort adapters thunderbolt: Fix buffer allocation of devices with no DisplayPort adapters thunderbolt: Add support for XDomain lane bonding thunderbolt: Ignore port locked error in tb_port_wait_for_link_width() thunderbolt: Split setting link width and lane bonding into own functions thunderbolt: Move tb_port_state() prototype to correct place thunderbolt: Add debug logging when lane is enabled/disabled thunderbolt: Link USB4 ports to their USB Type-C connectors misc/mei: Add NULL check to component match callback functions thunderbolt: Use different lane for second DisplayPort tunnel thunderbolt: Dump path config space entries during discovery thunderbolt: Use decimal number with port numbers thunderbolt: Fix typo in comment thunderbolt: Replace usage of found with dedicated list iterator variable
This commit is contained in:
commit
376d6b02cb
@ -293,6 +293,16 @@ Contact: thunderbolt-software@lists.01.org
|
||||
Description: This contains XDomain service specific settings as
|
||||
bitmask. Format: %x
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/usb4_portX/connector
|
||||
Date: April 2022
|
||||
Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
|
||||
Description:
|
||||
Symlink to the USB Type-C connector. This link is only
|
||||
created when USB Type-C Connector Class is enabled,
|
||||
and only if the system firmware is capable of
|
||||
describing the connection between a port and its
|
||||
connector.
|
||||
|
||||
What: /sys/bus/thunderbolt/devices/usb4_portX/link
|
||||
Date: Sep 2021
|
||||
KernelVersion: v5.14
|
||||
|
@ -784,7 +784,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
|
||||
{
|
||||
struct device *base = data;
|
||||
|
||||
if (strcmp(dev->driver->name, "i915") ||
|
||||
if (!dev->driver || strcmp(dev->driver->name, "i915") ||
|
||||
subcomponent != I915_COMPONENT_HDCP)
|
||||
return 0;
|
||||
|
||||
|
@ -131,7 +131,7 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
|
||||
{
|
||||
struct device *base = data;
|
||||
|
||||
if (strcmp(dev->driver->name, "i915") ||
|
||||
if (!dev->driver || strcmp(dev->driver->name, "i915") ||
|
||||
subcomponent != I915_COMPONENT_PXP)
|
||||
return 0;
|
||||
|
||||
|
@ -158,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
|
||||
static struct tb_cfg_request *
|
||||
tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
|
||||
{
|
||||
struct tb_cfg_request *req;
|
||||
bool found = false;
|
||||
struct tb_cfg_request *req = NULL, *iter;
|
||||
|
||||
mutex_lock(&pkg->ctl->request_queue_lock);
|
||||
list_for_each_entry(req, &pkg->ctl->request_queue, list) {
|
||||
tb_cfg_request_get(req);
|
||||
if (req->match(req, pkg)) {
|
||||
found = true;
|
||||
list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
|
||||
tb_cfg_request_get(iter);
|
||||
if (iter->match(iter, pkg)) {
|
||||
req = iter;
|
||||
break;
|
||||
}
|
||||
tb_cfg_request_put(req);
|
||||
tb_cfg_request_put(iter);
|
||||
}
|
||||
mutex_unlock(&pkg->ctl->request_queue_lock);
|
||||
|
||||
return found ? req : NULL;
|
||||
return req;
|
||||
}
|
||||
|
||||
/* utility functions */
|
||||
|
@ -1207,7 +1207,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
nhi->pdev = pdev;
|
||||
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
|
||||
/* cannot fail - table is allocated bin pcim_iomap_regions */
|
||||
/* cannot fail - table is allocated in pcim_iomap_regions */
|
||||
nhi->iobase = pcim_iomap_table(pdev)[0];
|
||||
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
|
||||
dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
|
||||
|
@ -166,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
|
||||
path->name, tb_route(src->sw), src->port);
|
||||
|
||||
p = src;
|
||||
h = src_hopid;
|
||||
|
||||
@ -198,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
|
||||
path->hops[i].out_port = out_port;
|
||||
path->hops[i].next_hop_index = next_hop;
|
||||
|
||||
tb_dump_hop(&path->hops[i], &hop);
|
||||
|
||||
h = next_hop;
|
||||
p = out_port->remote;
|
||||
}
|
||||
|
||||
tb_dbg(path->tb, "path discovery complete\n");
|
||||
return path;
|
||||
|
||||
err:
|
||||
|
@ -693,8 +693,14 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
|
||||
else
|
||||
phy |= LANE_ADP_CS_1_LD;
|
||||
|
||||
return tb_port_write(port, &phy, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
|
||||
ret = tb_port_write(port, &phy, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -993,7 +999,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
|
||||
return !!(widths & width);
|
||||
}
|
||||
|
||||
static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
|
||||
/**
|
||||
* tb_port_set_link_width() - Set target link width of the lane adapter
|
||||
* @port: Lane adapter
|
||||
* @width: Target link width (%1 or %2)
|
||||
*
|
||||
* Sets the target link width of the lane adapter to @width. Does not
|
||||
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
*/
|
||||
int tb_port_set_link_width(struct tb_port *port, unsigned int width)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
@ -1020,12 +1036,58 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
val |= LANE_ADP_CS_1_LB;
|
||||
|
||||
return tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_port_set_lane_bonding() - Enable/disable lane bonding
|
||||
* @port: Lane adapter
|
||||
* @bonding: enable/disable bonding
|
||||
*
|
||||
* Enables or disables lane bonding. This should be called after target
|
||||
* link width has been set (tb_port_set_link_width()). Note in most
|
||||
* cases one should use tb_port_lane_bonding_enable() instead to enable
|
||||
* lane bonding.
|
||||
*
|
||||
* As a side effect sets @port->bonding accordingly (and does the same
|
||||
* for lane 1 too).
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
*/
|
||||
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!port->cap_phy)
|
||||
return -EINVAL;
|
||||
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (bonding)
|
||||
val |= LANE_ADP_CS_1_LB;
|
||||
else
|
||||
val &= ~LANE_ADP_CS_1_LB;
|
||||
|
||||
ret = tb_port_write(port, &val, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* When lane 0 bonding is set it will affect lane 1 too so
|
||||
* update both.
|
||||
*/
|
||||
port->bonded = bonding;
|
||||
port->dual_link_port->bonded = bonding;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_port_lane_bonding_enable() - Enable bonding on port
|
||||
* @port: port to enable
|
||||
@ -1050,22 +1112,27 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
|
||||
if (ret == 1) {
|
||||
ret = tb_port_set_link_width(port, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_lane0;
|
||||
}
|
||||
|
||||
ret = tb_port_get_link_width(port->dual_link_port);
|
||||
if (ret == 1) {
|
||||
ret = tb_port_set_link_width(port->dual_link_port, 2);
|
||||
if (ret) {
|
||||
tb_port_set_link_width(port, 1);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto err_lane0;
|
||||
}
|
||||
|
||||
port->bonded = true;
|
||||
port->dual_link_port->bonded = true;
|
||||
ret = tb_port_set_lane_bonding(port, true);
|
||||
if (ret)
|
||||
goto err_lane1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_lane1:
|
||||
tb_port_set_link_width(port->dual_link_port, 1);
|
||||
err_lane0:
|
||||
tb_port_set_link_width(port, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1074,13 +1141,10 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
|
||||
*
|
||||
* Disable bonding by setting the link width of the port and the
|
||||
* other port in case of dual link port.
|
||||
*
|
||||
*/
|
||||
void tb_port_lane_bonding_disable(struct tb_port *port)
|
||||
{
|
||||
port->dual_link_port->bonded = false;
|
||||
port->bonded = false;
|
||||
|
||||
tb_port_set_lane_bonding(port, false);
|
||||
tb_port_set_link_width(port->dual_link_port, 1);
|
||||
tb_port_set_link_width(port, 1);
|
||||
}
|
||||
@ -1104,10 +1168,17 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
|
||||
|
||||
do {
|
||||
ret = tb_port_get_link_width(port);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret == width)
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Sometimes we get port locked error when
|
||||
* polling the lanes so we can ignore it and
|
||||
* retry.
|
||||
*/
|
||||
if (ret != -EACCES)
|
||||
return ret;
|
||||
} else if (ret == width) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
} while (ktime_before(ktime_get(), timeout));
|
||||
|
@ -169,12 +169,6 @@ static void tb_discover_tunnels(struct tb *tb)
|
||||
|
||||
static int tb_port_configure_xdomain(struct tb_port *port)
|
||||
{
|
||||
/*
|
||||
* XDomain paths currently only support single lane so we must
|
||||
* disable the other lane according to USB4 spec.
|
||||
*/
|
||||
tb_port_disable(port->dual_link_port);
|
||||
|
||||
if (tb_switch_is_usb4(port->sw))
|
||||
return usb4_port_configure_xdomain(port);
|
||||
return tb_lc_configure_xdomain(port);
|
||||
@ -867,7 +861,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
|
||||
|
||||
static void tb_tunnel_dp(struct tb *tb)
|
||||
{
|
||||
int available_up, available_down, ret;
|
||||
int available_up, available_down, ret, link_nr;
|
||||
struct tb_cm *tcm = tb_priv(tb);
|
||||
struct tb_port *port, *in, *out;
|
||||
struct tb_tunnel *tunnel;
|
||||
@ -912,6 +906,20 @@ static void tb_tunnel_dp(struct tb *tb)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is only applicable to links that are not bonded (so
|
||||
* when Thunderbolt 1 hardware is involved somewhere in the
|
||||
* topology). For these try to share the DP bandwidth between
|
||||
* the two lanes.
|
||||
*/
|
||||
link_nr = 1;
|
||||
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
|
||||
if (tb_tunnel_is_dp(tunnel)) {
|
||||
link_nr = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DP stream needs the domain to be active so runtime resume
|
||||
* both ends of the tunnel.
|
||||
@ -943,7 +951,8 @@ static void tb_tunnel_dp(struct tb *tb)
|
||||
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
|
||||
available_up, available_down);
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
|
||||
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
|
||||
available_down);
|
||||
if (!tunnel) {
|
||||
tb_port_dbg(out, "could not allocate DP tunnel\n");
|
||||
goto err_reclaim;
|
||||
|
@ -674,7 +674,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
|
||||
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
|
||||
do { \
|
||||
const struct tb_port *__port = (_port); \
|
||||
level(__port->sw->tb, "%llx:%x: " fmt, \
|
||||
level(__port->sw->tb, "%llx:%u: " fmt, \
|
||||
tb_route(__port->sw), __port->port, ## arg); \
|
||||
} while (0)
|
||||
#define tb_port_WARN(port, fmt, arg...) \
|
||||
@ -991,6 +991,7 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw);
|
||||
int tb_switch_xhci_connect(struct tb_switch *sw);
|
||||
void tb_switch_xhci_disconnect(struct tb_switch *sw);
|
||||
|
||||
int tb_port_state(struct tb_port *port);
|
||||
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
|
||||
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
|
||||
int tb_port_clear_counter(struct tb_port *port, int counter);
|
||||
@ -1023,7 +1024,8 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
|
||||
|
||||
int tb_port_get_link_speed(struct tb_port *port);
|
||||
int tb_port_get_link_width(struct tb_port *port);
|
||||
int tb_port_state(struct tb_port *port);
|
||||
int tb_port_set_link_width(struct tb_port *port, unsigned int width);
|
||||
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
|
||||
int tb_port_lane_bonding_enable(struct tb_port *port);
|
||||
void tb_port_lane_bonding_disable(struct tb_port *port);
|
||||
int tb_port_wait_for_link_width(struct tb_port *port, int width,
|
||||
|
@ -527,6 +527,10 @@ enum tb_xdp_type {
|
||||
PROPERTIES_CHANGED_RESPONSE,
|
||||
ERROR_RESPONSE,
|
||||
UUID_REQUEST = 12,
|
||||
LINK_STATE_STATUS_REQUEST = 15,
|
||||
LINK_STATE_STATUS_RESPONSE,
|
||||
LINK_STATE_CHANGE_REQUEST,
|
||||
LINK_STATE_CHANGE_RESPONSE,
|
||||
};
|
||||
|
||||
struct tb_xdp_header {
|
||||
@ -540,6 +544,41 @@ struct tb_xdp_error_response {
|
||||
u32 error;
|
||||
};
|
||||
|
||||
struct tb_xdp_link_state_status {
|
||||
struct tb_xdp_header hdr;
|
||||
};
|
||||
|
||||
struct tb_xdp_link_state_status_response {
|
||||
union {
|
||||
struct tb_xdp_error_response err;
|
||||
struct {
|
||||
struct tb_xdp_header hdr;
|
||||
u32 status;
|
||||
u8 slw;
|
||||
u8 tlw;
|
||||
u8 sls;
|
||||
u8 tls;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct tb_xdp_link_state_change {
|
||||
struct tb_xdp_header hdr;
|
||||
u8 tlw;
|
||||
u8 tls;
|
||||
u16 reserved;
|
||||
};
|
||||
|
||||
struct tb_xdp_link_state_change_response {
|
||||
union {
|
||||
struct tb_xdp_error_response err;
|
||||
struct {
|
||||
struct tb_xdp_header hdr;
|
||||
u32 status;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct tb_xdp_uuid {
|
||||
struct tb_xdp_header hdr;
|
||||
};
|
||||
|
@ -311,11 +311,16 @@ struct tb_regs_port_header {
|
||||
|
||||
/* Lane adapter registers */
|
||||
#define LANE_ADP_CS_0 0x00
|
||||
#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16)
|
||||
#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16
|
||||
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
|
||||
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
|
||||
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
|
||||
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
|
||||
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
|
||||
#define LANE_ADP_CS_1 0x01
|
||||
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
|
||||
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
|
||||
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
|
||||
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
|
||||
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
|
||||
|
@ -341,6 +341,47 @@ static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
|
||||
return sw;
|
||||
}
|
||||
|
||||
static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
|
||||
struct tb_switch *parent,
|
||||
u64 route, bool bonded)
|
||||
{
|
||||
struct tb_switch *sw;
|
||||
int i;
|
||||
|
||||
sw = alloc_dev_default(test, parent, route, bonded);
|
||||
if (!sw)
|
||||
return NULL;
|
||||
/*
|
||||
* Device with:
|
||||
* 2x USB4 Adapters (adapters 1,2 and 3,4),
|
||||
* 1x PCIe Upstream (adapter 9),
|
||||
* 1x PCIe Downstream (adapter 10),
|
||||
* 1x USB3 Upstream (adapter 16),
|
||||
* 1x USB3 Downstream (adapter 17)
|
||||
*/
|
||||
for (i = 5; i <= 8; i++)
|
||||
sw->ports[i].disabled = true;
|
||||
|
||||
for (i = 11; i <= 14; i++)
|
||||
sw->ports[i].disabled = true;
|
||||
|
||||
sw->ports[13].cap_adap = 0;
|
||||
sw->ports[14].cap_adap = 0;
|
||||
|
||||
for (i = 18; i <= 19; i++)
|
||||
sw->ports[i].disabled = true;
|
||||
|
||||
sw->generation = 4;
|
||||
sw->credit_allocation = true;
|
||||
sw->max_usb3_credits = 109;
|
||||
sw->min_dp_aux_credits = 0;
|
||||
sw->min_dp_main_credits = 0;
|
||||
sw->max_pcie_credits = 30;
|
||||
sw->max_dma_credits = 1;
|
||||
|
||||
return sw;
|
||||
}
|
||||
|
||||
static struct tb_switch *alloc_dev_usb4(struct kunit *test,
|
||||
struct tb_switch *parent,
|
||||
u64 route, bool bonded)
|
||||
@ -1348,7 +1389,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
|
||||
in = &host->ports[5];
|
||||
out = &dev->ports[13];
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
|
||||
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
|
||||
@ -1394,7 +1435,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
|
||||
in = &host->ports[5];
|
||||
out = &dev4->ports[14];
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
|
||||
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
|
||||
@ -1444,7 +1485,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
|
||||
in = &dev2->ports[13];
|
||||
out = &dev5->ports[13];
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
|
||||
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
|
||||
@ -1509,7 +1550,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
|
||||
in = &dev6->ports[13];
|
||||
out = &dev12->ports[13];
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
|
||||
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
|
||||
@ -1627,7 +1668,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
|
||||
in = &dev2->ports[13];
|
||||
out = &dev5->ports[13];
|
||||
|
||||
dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
|
||||
|
||||
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
|
||||
@ -1996,6 +2037,56 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
|
||||
tb_tunnel_free(tunnel);
|
||||
}
|
||||
|
||||
static void tb_test_credit_alloc_without_dp(struct kunit *test)
|
||||
{
|
||||
struct tb_switch *host, *dev;
|
||||
struct tb_port *up, *down;
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_path *path;
|
||||
|
||||
host = alloc_host_usb4(test);
|
||||
dev = alloc_dev_without_dp(test, host, 0x1, true);
|
||||
|
||||
/*
|
||||
* The device has no DP therefore baMinDPmain = baMinDPaux = 0
|
||||
*
|
||||
* Create PCIe path with buffers less than baMaxPCIe.
|
||||
*
|
||||
* For a device with buffers configurations:
|
||||
* baMaxUSB3 = 109
|
||||
* baMinDPaux = 0
|
||||
* baMinDPmain = 0
|
||||
* baMaxPCIe = 30
|
||||
* baMaxHI = 1
|
||||
* Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
|
||||
* PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
|
||||
* = Max(6, Min(30, 9) = 9
|
||||
*/
|
||||
down = &host->ports[8];
|
||||
up = &dev->ports[9];
|
||||
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
|
||||
|
||||
/* PCIe downstream path */
|
||||
path = tunnel->paths[0];
|
||||
KUNIT_ASSERT_EQ(test, path->path_length, 2);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
|
||||
|
||||
/* PCIe upstream path */
|
||||
path = tunnel->paths[1];
|
||||
KUNIT_ASSERT_EQ(test, path->path_length, 2);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
|
||||
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
|
||||
|
||||
tb_tunnel_free(tunnel);
|
||||
}
|
||||
|
||||
static void tb_test_credit_alloc_dp(struct kunit *test)
|
||||
{
|
||||
struct tb_switch *host, *dev;
|
||||
@ -2009,7 +2100,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
|
||||
in = &host->ports[5];
|
||||
out = &dev->ports[14];
|
||||
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
|
||||
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
|
||||
|
||||
@ -2245,7 +2336,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
|
||||
|
||||
in = &host->ports[5];
|
||||
out = &dev->ports[13];
|
||||
dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, dp_tunnel1 != NULL);
|
||||
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
|
||||
|
||||
@ -2282,7 +2373,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
|
||||
|
||||
in = &host->ports[6];
|
||||
out = &dev->ports[14];
|
||||
dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
|
||||
dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
|
||||
KUNIT_ASSERT_TRUE(test, dp_tunnel2 != NULL);
|
||||
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
|
||||
|
||||
@ -2709,6 +2800,7 @@ static struct kunit_case tb_test_cases[] = {
|
||||
KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
|
||||
KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
|
||||
KUNIT_CASE(tb_test_credit_alloc_pcie),
|
||||
KUNIT_CASE(tb_test_credit_alloc_without_dp),
|
||||
KUNIT_CASE(tb_test_credit_alloc_dp),
|
||||
KUNIT_CASE(tb_test_credit_alloc_usb3),
|
||||
KUNIT_CASE(tb_test_credit_alloc_dma),
|
||||
|
@ -102,8 +102,11 @@ static unsigned int tb_available_credits(const struct tb_port *port,
|
||||
* Maximum number of DP streams possible through the
|
||||
* lane adapter.
|
||||
*/
|
||||
ndp = (credits - (usb3 + pcie + spare)) /
|
||||
(sw->min_dp_aux_credits + sw->min_dp_main_credits);
|
||||
if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
|
||||
ndp = (credits - (usb3 + pcie + spare)) /
|
||||
(sw->min_dp_aux_credits + sw->min_dp_main_credits);
|
||||
else
|
||||
ndp = 0;
|
||||
} else {
|
||||
ndp = 0;
|
||||
}
|
||||
@ -858,6 +861,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
||||
* @tb: Pointer to the domain structure
|
||||
* @in: DP in adapter port
|
||||
* @out: DP out adapter port
|
||||
* @link_nr: Preferred lane adapter when the link is not bonded
|
||||
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
|
||||
* if not limited)
|
||||
* @max_down: Maximum available downstream bandwidth for the DP tunnel
|
||||
@ -869,8 +873,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
||||
* Return: Returns a tb_tunnel on success or NULL on failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
struct tb_port *out, int max_up,
|
||||
int max_down)
|
||||
struct tb_port *out, int link_nr,
|
||||
int max_up, int max_down)
|
||||
{
|
||||
struct tb_tunnel *tunnel;
|
||||
struct tb_path **paths;
|
||||
@ -894,21 +898,21 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
paths = tunnel->paths;
|
||||
|
||||
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
|
||||
1, "Video");
|
||||
link_nr, "Video");
|
||||
if (!path)
|
||||
goto err_free;
|
||||
tb_dp_init_video_path(path);
|
||||
paths[TB_DP_VIDEO_PATH_OUT] = path;
|
||||
|
||||
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
|
||||
TB_DP_AUX_TX_HOPID, 1, "AUX TX");
|
||||
TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
|
||||
if (!path)
|
||||
goto err_free;
|
||||
tb_dp_init_aux_path(path);
|
||||
paths[TB_DP_AUX_PATH_OUT] = path;
|
||||
|
||||
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
|
||||
TB_DP_AUX_RX_HOPID, 1, "AUX RX");
|
||||
TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
|
||||
if (!path)
|
||||
goto err_free;
|
||||
tb_dp_init_aux_path(path);
|
||||
|
@ -71,8 +71,8 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
|
||||
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
||||
bool alloc_hopid);
|
||||
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
struct tb_port *out, int max_up,
|
||||
int max_down);
|
||||
struct tb_port *out, int link_nr,
|
||||
int max_up, int max_down);
|
||||
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
||||
struct tb_port *dst, int transmit_path,
|
||||
int transmit_ring, int receive_path,
|
||||
|
@ -7,9 +7,37 @@
|
||||
*/
|
||||
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
static int connector_bind(struct device *dev, struct device *connector, void *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
|
||||
if (ret)
|
||||
sysfs_remove_link(&dev->kobj, "connector");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void connector_unbind(struct device *dev, struct device *connector, void *data)
|
||||
{
|
||||
sysfs_remove_link(&connector->kobj, dev_name(dev));
|
||||
sysfs_remove_link(&dev->kobj, "connector");
|
||||
}
|
||||
|
||||
static const struct component_ops connector_ops = {
|
||||
.bind = connector_bind,
|
||||
.unbind = connector_unbind,
|
||||
};
|
||||
|
||||
static ssize_t link_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
@ -246,6 +274,14 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (dev_fwnode(&usb4->dev)) {
|
||||
ret = component_add(&usb4->dev, &connector_ops);
|
||||
if (ret) {
|
||||
dev_err(&usb4->dev, "failed to add component\n");
|
||||
device_unregister(&usb4->dev);
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_no_callbacks(&usb4->dev);
|
||||
pm_runtime_set_active(&usb4->dev);
|
||||
pm_runtime_enable(&usb4->dev);
|
||||
@ -265,6 +301,8 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
|
||||
*/
|
||||
void usb4_port_device_remove(struct usb4_port *usb4)
|
||||
{
|
||||
if (dev_fwnode(&usb4->dev))
|
||||
component_del(&usb4->dev, &connector_ops);
|
||||
device_unregister(&usb4->dev);
|
||||
}
|
||||
|
||||
|
@ -19,13 +19,38 @@
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
|
||||
#define XDOMAIN_UUID_RETRIES 10
|
||||
#define XDOMAIN_PROPERTIES_RETRIES 10
|
||||
#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
|
||||
#define XDOMAIN_BONDING_WAIT 100 /* ms */
|
||||
#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
|
||||
#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
|
||||
#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
|
||||
#define XDOMAIN_RETRIES 10
|
||||
#define XDOMAIN_DEFAULT_MAX_HOPID 15
|
||||
|
||||
enum {
|
||||
XDOMAIN_STATE_INIT,
|
||||
XDOMAIN_STATE_UUID,
|
||||
XDOMAIN_STATE_LINK_STATUS,
|
||||
XDOMAIN_STATE_LINK_STATE_CHANGE,
|
||||
XDOMAIN_STATE_LINK_STATUS2,
|
||||
XDOMAIN_STATE_BONDING_UUID_LOW,
|
||||
XDOMAIN_STATE_BONDING_UUID_HIGH,
|
||||
XDOMAIN_STATE_PROPERTIES,
|
||||
XDOMAIN_STATE_ENUMERATED,
|
||||
XDOMAIN_STATE_ERROR,
|
||||
};
|
||||
|
||||
static const char * const state_names[] = {
|
||||
[XDOMAIN_STATE_INIT] = "INIT",
|
||||
[XDOMAIN_STATE_UUID] = "UUID",
|
||||
[XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
|
||||
[XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
|
||||
[XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
|
||||
[XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
|
||||
[XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
|
||||
[XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
|
||||
[XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
|
||||
[XDOMAIN_STATE_ERROR] = "ERROR",
|
||||
};
|
||||
|
||||
struct xdomain_request_work {
|
||||
struct work_struct work;
|
||||
struct tb_xdp_header *pkg;
|
||||
@ -235,7 +260,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
|
||||
}
|
||||
|
||||
static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
|
||||
uuid_t *uuid)
|
||||
uuid_t *uuid, u64 *remote_route)
|
||||
{
|
||||
struct tb_xdp_uuid_response res;
|
||||
struct tb_xdp_uuid req;
|
||||
@ -258,6 +283,8 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
|
||||
return ret;
|
||||
|
||||
uuid_copy(uuid, &res.src_uuid);
|
||||
*remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -473,6 +500,112 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
|
||||
TB_CFG_PKG_XDOMAIN_RESP);
|
||||
}
|
||||
|
||||
static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
|
||||
u8 sequence, u8 *slw, u8 *tlw,
|
||||
u8 *sls, u8 *tls)
|
||||
{
|
||||
struct tb_xdp_link_state_status_response res;
|
||||
struct tb_xdp_link_state_status req;
|
||||
int ret;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
|
||||
sizeof(req));
|
||||
|
||||
memset(&res, 0, sizeof(res));
|
||||
ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
|
||||
&res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
|
||||
XDOMAIN_DEFAULT_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_xdp_handle_error(&res.err);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (res.status != 0)
|
||||
return -EREMOTEIO;
|
||||
|
||||
*slw = res.slw;
|
||||
*tlw = res.tlw;
|
||||
*sls = res.sls;
|
||||
*tls = res.tls;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
|
||||
struct tb_xdomain *xd, u8 sequence)
|
||||
{
|
||||
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
|
||||
struct tb_xdp_link_state_status_response res;
|
||||
struct tb_port *port = tb_port_at(xd->route, sw);
|
||||
u32 val[2];
|
||||
int ret;
|
||||
|
||||
memset(&res, 0, sizeof(res));
|
||||
tb_xdp_fill_header(&res.hdr, xd->route, sequence,
|
||||
LINK_STATE_STATUS_RESPONSE, sizeof(res));
|
||||
|
||||
ret = tb_port_read(port, val, TB_CFG_PORT,
|
||||
port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
|
||||
LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
|
||||
res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
|
||||
LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
|
||||
res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
|
||||
res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
|
||||
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
|
||||
|
||||
return __tb_xdomain_response(ctl, &res, sizeof(res),
|
||||
TB_CFG_PKG_XDOMAIN_RESP);
|
||||
}
|
||||
|
||||
static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
|
||||
u8 sequence, u8 tlw, u8 tls)
|
||||
{
|
||||
struct tb_xdp_link_state_change_response res;
|
||||
struct tb_xdp_link_state_change req;
|
||||
int ret;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
|
||||
sizeof(req));
|
||||
req.tlw = tlw;
|
||||
req.tls = tls;
|
||||
|
||||
memset(&res, 0, sizeof(res));
|
||||
ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
|
||||
&res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
|
||||
XDOMAIN_DEFAULT_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tb_xdp_handle_error(&res.err);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return res.status != 0 ? -EREMOTEIO : 0;
|
||||
}
|
||||
|
||||
static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
|
||||
u8 sequence, u32 status)
|
||||
{
|
||||
struct tb_xdp_link_state_change_response res;
|
||||
|
||||
memset(&res, 0, sizeof(res));
|
||||
tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
|
||||
sizeof(res));
|
||||
|
||||
res.status = status;
|
||||
|
||||
return __tb_xdomain_response(ctl, &res, sizeof(res),
|
||||
TB_CFG_PKG_XDOMAIN_RESP);
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_register_protocol_handler() - Register protocol handler
|
||||
* @handler: Handler to register
|
||||
@ -600,14 +733,13 @@ static void tb_xdp_handle_request(struct work_struct *work)
|
||||
goto out;
|
||||
}
|
||||
|
||||
tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
|
||||
|
||||
xd = tb_xdomain_find_by_route_locked(tb, route);
|
||||
if (xd)
|
||||
update_property_block(xd);
|
||||
|
||||
switch (pkg->type) {
|
||||
case PROPERTIES_REQUEST:
|
||||
tb_dbg(tb, "%llx: received XDomain properties request\n", route);
|
||||
if (xd) {
|
||||
ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
|
||||
(const struct tb_xdp_properties *)pkg);
|
||||
@ -615,6 +747,9 @@ static void tb_xdp_handle_request(struct work_struct *work)
|
||||
break;
|
||||
|
||||
case PROPERTIES_CHANGED_REQUEST:
|
||||
tb_dbg(tb, "%llx: received XDomain properties changed request\n",
|
||||
route);
|
||||
|
||||
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
|
||||
|
||||
/*
|
||||
@ -622,18 +757,51 @@ static void tb_xdp_handle_request(struct work_struct *work)
|
||||
* the xdomain related to this connection as well in
|
||||
* case there is a change in services it offers.
|
||||
*/
|
||||
if (xd && device_is_registered(&xd->dev)) {
|
||||
queue_delayed_work(tb->wq, &xd->get_properties_work,
|
||||
msecs_to_jiffies(50));
|
||||
}
|
||||
if (xd && device_is_registered(&xd->dev))
|
||||
queue_delayed_work(tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
|
||||
break;
|
||||
|
||||
case UUID_REQUEST_OLD:
|
||||
case UUID_REQUEST:
|
||||
tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
|
||||
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
|
||||
break;
|
||||
|
||||
case LINK_STATE_STATUS_REQUEST:
|
||||
tb_dbg(tb, "%llx: received XDomain link state status request\n",
|
||||
route);
|
||||
|
||||
if (xd) {
|
||||
ret = tb_xdp_link_state_status_response(tb, ctl, xd,
|
||||
sequence);
|
||||
} else {
|
||||
tb_xdp_error_response(ctl, route, sequence,
|
||||
ERROR_NOT_READY);
|
||||
}
|
||||
break;
|
||||
|
||||
case LINK_STATE_CHANGE_REQUEST:
|
||||
tb_dbg(tb, "%llx: received XDomain link state change request\n",
|
||||
route);
|
||||
|
||||
if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
|
||||
const struct tb_xdp_link_state_change *lsc =
|
||||
(const struct tb_xdp_link_state_change *)pkg;
|
||||
|
||||
ret = tb_xdp_link_state_change_response(ctl, route,
|
||||
sequence, 0);
|
||||
xd->target_link_width = lsc->tlw;
|
||||
queue_delayed_work(tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
|
||||
} else {
|
||||
tb_xdp_error_response(ctl, route, sequence,
|
||||
ERROR_NOT_READY);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
|
||||
tb_xdp_error_response(ctl, route, sequence,
|
||||
ERROR_NOT_SUPPORTED);
|
||||
break;
|
||||
@ -1000,32 +1168,38 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tb_xdomain_get_uuid(struct work_struct *work)
|
||||
static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
|
||||
{
|
||||
struct tb_xdomain *xd = container_of(work, typeof(*xd),
|
||||
get_uuid_work.work);
|
||||
struct tb *tb = xd->tb;
|
||||
uuid_t uuid;
|
||||
u64 route;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&xd->dev, "requesting remote UUID\n");
|
||||
|
||||
ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
|
||||
ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
|
||||
&route);
|
||||
if (ret < 0) {
|
||||
if (xd->uuid_retries-- > 0) {
|
||||
if (xd->state_retries-- > 0) {
|
||||
dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
|
||||
queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
|
||||
msecs_to_jiffies(100));
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
dev_dbg(&xd->dev, "failed to read remote UUID\n");
|
||||
}
|
||||
return;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
|
||||
|
||||
if (uuid_equal(&uuid, xd->local_uuid))
|
||||
dev_dbg(&xd->dev, "intra-domain loop detected\n");
|
||||
if (uuid_equal(&uuid, xd->local_uuid)) {
|
||||
if (route == xd->route)
|
||||
dev_dbg(&xd->dev, "loop back detected\n");
|
||||
else
|
||||
dev_dbg(&xd->dev, "intra-domain loop detected\n");
|
||||
|
||||
/* Don't bond lanes automatically for loops */
|
||||
xd->bonding_possible = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the UUID is different, there is another domain connected
|
||||
@ -1035,27 +1209,152 @@ static void tb_xdomain_get_uuid(struct work_struct *work)
|
||||
if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
|
||||
dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
|
||||
xd->is_unplugged = true;
|
||||
return;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* First time fill in the missing UUID */
|
||||
if (!xd->remote_uuid) {
|
||||
xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
|
||||
if (!xd->remote_uuid)
|
||||
return;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Now we can start the normal properties exchange */
|
||||
queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
|
||||
msecs_to_jiffies(100));
|
||||
queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
|
||||
msecs_to_jiffies(1000));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tb_xdomain_get_properties(struct work_struct *work)
|
||||
static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
|
||||
{
|
||||
struct tb *tb = xd->tb;
|
||||
u8 slw, tlw, sls, tls;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
|
||||
xd->remote_uuid);
|
||||
|
||||
ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
|
||||
xd->state_retries, &slw, &tlw, &sls,
|
||||
&tls);
|
||||
if (ret) {
|
||||
if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
|
||||
dev_dbg(&xd->dev,
|
||||
"failed to request remote link status, retrying\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
dev_dbg(&xd->dev, "failed to receive remote link status\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
|
||||
|
||||
if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
|
||||
dev_dbg(&xd->dev, "remote adapter is single lane only\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
|
||||
unsigned int width)
|
||||
{
|
||||
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
|
||||
struct tb_port *port = tb_port_at(xd->route, sw);
|
||||
struct tb *tb = xd->tb;
|
||||
u8 tlw, tls;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (width == 2)
|
||||
tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
|
||||
else if (width == 1)
|
||||
tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* Use the current target speed */
|
||||
ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
|
||||
|
||||
dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
|
||||
tlw, tls);
|
||||
|
||||
ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
|
||||
xd->state_retries, tlw, tls);
|
||||
if (ret) {
|
||||
if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
|
||||
dev_dbg(&xd->dev,
|
||||
"failed to change remote link state, retrying\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
dev_err(&xd->dev, "failed request link state change, aborting\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_dbg(&xd->dev, "received link state change response\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
|
||||
{
|
||||
struct tb_port *port;
|
||||
int ret, width;
|
||||
|
||||
if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
|
||||
width = 1;
|
||||
} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
|
||||
width = 2;
|
||||
} else {
|
||||
if (xd->state_retries-- > 0) {
|
||||
dev_dbg(&xd->dev,
|
||||
"link state change request not received yet, retrying\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
dev_dbg(&xd->dev, "timeout waiting for link change request\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
|
||||
|
||||
/*
|
||||
* We can't use tb_xdomain_lane_bonding_enable() here because it
|
||||
* is the other side that initiates lane bonding. So here we
|
||||
* just set the width to both lane adapters and wait for the
|
||||
* link to transition bonded.
|
||||
*/
|
||||
ret = tb_port_set_link_width(port->dual_link_port, width);
|
||||
if (ret) {
|
||||
tb_port_warn(port->dual_link_port,
|
||||
"failed to set link width to %d\n", width);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tb_port_set_link_width(port, width);
|
||||
if (ret) {
|
||||
tb_port_warn(port, "failed to set link width to %d\n", width);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
|
||||
if (ret) {
|
||||
dev_warn(&xd->dev, "error waiting for link width to become %d\n",
|
||||
width);
|
||||
return ret;
|
||||
}
|
||||
|
||||
port->bonded = width == 2;
|
||||
port->dual_link_port->bonded = width == 2;
|
||||
|
||||
tb_port_update_credits(port);
|
||||
tb_xdomain_update_link_attributes(xd);
|
||||
|
||||
dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tb_xdomain_get_properties(struct tb_xdomain *xd)
|
||||
{
|
||||
struct tb_xdomain *xd = container_of(work, typeof(*xd),
|
||||
get_properties_work.work);
|
||||
struct tb_property_dir *dir;
|
||||
struct tb *tb = xd->tb;
|
||||
bool update = false;
|
||||
@ -1066,34 +1365,35 @@ static void tb_xdomain_get_properties(struct work_struct *work)
|
||||
dev_dbg(&xd->dev, "requesting remote properties\n");
|
||||
|
||||
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
|
||||
xd->remote_uuid, xd->properties_retries,
|
||||
xd->remote_uuid, xd->state_retries,
|
||||
&block, &gen);
|
||||
if (ret < 0) {
|
||||
if (xd->properties_retries-- > 0) {
|
||||
if (xd->state_retries-- > 0) {
|
||||
dev_dbg(&xd->dev,
|
||||
"failed to request remote properties, retrying\n");
|
||||
queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
|
||||
msecs_to_jiffies(1000));
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
/* Give up now */
|
||||
dev_err(&xd->dev,
|
||||
"failed read XDomain properties from %pUb\n",
|
||||
xd->remote_uuid);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&xd->lock);
|
||||
|
||||
/* Only accept newer generation properties */
|
||||
if (xd->remote_properties && gen <= xd->remote_property_block_gen)
|
||||
if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
|
||||
ret = 0;
|
||||
goto err_free_block;
|
||||
}
|
||||
|
||||
dir = tb_property_parse_dir(block, ret);
|
||||
if (!dir) {
|
||||
dev_err(&xd->dev, "failed to parse XDomain properties\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_block;
|
||||
}
|
||||
|
||||
@ -1124,9 +1424,16 @@ static void tb_xdomain_get_properties(struct work_struct *work)
|
||||
* registered, we notify the userspace that it has changed.
|
||||
*/
|
||||
if (!update) {
|
||||
struct tb_port *port;
|
||||
|
||||
/* Now disable lane 1 if bonding was not enabled */
|
||||
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
|
||||
if (!port->bonded)
|
||||
tb_port_disable(port->dual_link_port);
|
||||
|
||||
if (device_add(&xd->dev)) {
|
||||
dev_err(&xd->dev, "failed to add XDomain device\n");
|
||||
return;
|
||||
return -ENODEV;
|
||||
}
|
||||
dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
|
||||
xd->vendor, xd->device);
|
||||
@ -1138,13 +1445,193 @@ static void tb_xdomain_get_properties(struct work_struct *work)
|
||||
}
|
||||
|
||||
enumerate_services(xd);
|
||||
return;
|
||||
return 0;
|
||||
|
||||
err_free_dir:
|
||||
tb_property_free_dir(dir);
|
||||
err_free_block:
|
||||
kfree(block);
|
||||
mutex_unlock(&xd->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->state = XDOMAIN_STATE_UUID;
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->state = XDOMAIN_STATE_LINK_STATUS;
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->state = XDOMAIN_STATE_LINK_STATUS2;
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
|
||||
{
|
||||
if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
|
||||
dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
|
||||
xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
|
||||
} else {
|
||||
dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
|
||||
xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
|
||||
}
|
||||
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->state = XDOMAIN_STATE_PROPERTIES;
|
||||
xd->state_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->properties_changed_retries = XDOMAIN_RETRIES;
|
||||
queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
|
||||
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_state_work(struct work_struct *work)
|
||||
{
|
||||
struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
|
||||
int ret, state = xd->state;
|
||||
|
||||
if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
|
||||
state > XDOMAIN_STATE_ERROR))
|
||||
return;
|
||||
|
||||
dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
|
||||
|
||||
switch (state) {
|
||||
case XDOMAIN_STATE_INIT:
|
||||
if (xd->needs_uuid) {
|
||||
tb_xdomain_queue_uuid(xd);
|
||||
} else {
|
||||
tb_xdomain_queue_properties_changed(xd);
|
||||
tb_xdomain_queue_properties(xd);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_UUID:
|
||||
ret = tb_xdomain_get_uuid(xd);
|
||||
if (ret) {
|
||||
if (ret == -EAGAIN)
|
||||
goto retry_state;
|
||||
xd->state = XDOMAIN_STATE_ERROR;
|
||||
} else {
|
||||
tb_xdomain_queue_properties_changed(xd);
|
||||
if (xd->bonding_possible)
|
||||
tb_xdomain_queue_link_status(xd);
|
||||
else
|
||||
tb_xdomain_queue_properties(xd);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_LINK_STATUS:
|
||||
ret = tb_xdomain_get_link_status(xd);
|
||||
if (ret) {
|
||||
if (ret == -EAGAIN)
|
||||
goto retry_state;
|
||||
|
||||
/*
|
||||
* If any of the lane bonding states fail we skip
|
||||
* bonding completely and try to continue from
|
||||
* reading properties.
|
||||
*/
|
||||
tb_xdomain_queue_properties(xd);
|
||||
} else {
|
||||
tb_xdomain_queue_bonding(xd);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_LINK_STATE_CHANGE:
|
||||
ret = tb_xdomain_link_state_change(xd, 2);
|
||||
if (ret) {
|
||||
if (ret == -EAGAIN)
|
||||
goto retry_state;
|
||||
tb_xdomain_queue_properties(xd);
|
||||
} else {
|
||||
tb_xdomain_queue_link_status2(xd);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_LINK_STATUS2:
|
||||
ret = tb_xdomain_get_link_status(xd);
|
||||
if (ret) {
|
||||
if (ret == -EAGAIN)
|
||||
goto retry_state;
|
||||
tb_xdomain_queue_properties(xd);
|
||||
} else {
|
||||
tb_xdomain_queue_bonding_uuid_low(xd);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_BONDING_UUID_LOW:
|
||||
tb_xdomain_lane_bonding_enable(xd);
|
||||
tb_xdomain_queue_properties(xd);
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_BONDING_UUID_HIGH:
|
||||
if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
|
||||
goto retry_state;
|
||||
tb_xdomain_queue_properties(xd);
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_PROPERTIES:
|
||||
ret = tb_xdomain_get_properties(xd);
|
||||
if (ret) {
|
||||
if (ret == -EAGAIN)
|
||||
goto retry_state;
|
||||
xd->state = XDOMAIN_STATE_ERROR;
|
||||
} else {
|
||||
xd->state = XDOMAIN_STATE_ENUMERATED;
|
||||
}
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_ENUMERATED:
|
||||
tb_xdomain_queue_properties(xd);
|
||||
break;
|
||||
|
||||
case XDOMAIN_STATE_ERROR:
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_warn(&xd->dev, "unexpected state %d\n", state);
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
retry_state:
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void tb_xdomain_properties_changed(struct work_struct *work)
|
||||
@ -1163,13 +1650,13 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
|
||||
"failed to send properties changed notification, retrying\n");
|
||||
queue_delayed_work(xd->tb->wq,
|
||||
&xd->properties_changed_work,
|
||||
msecs_to_jiffies(1000));
|
||||
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
|
||||
}
|
||||
dev_err(&xd->dev, "failed to send properties changed notification\n");
|
||||
return;
|
||||
}
|
||||
|
||||
xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
|
||||
xd->properties_changed_retries = XDOMAIN_RETRIES;
|
||||
}
|
||||
|
||||
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
|
||||
@ -1304,31 +1791,17 @@ static void tb_xdomain_release(struct device *dev)
|
||||
|
||||
static void start_handshake(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->uuid_retries = XDOMAIN_UUID_RETRIES;
|
||||
xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
|
||||
xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
|
||||
|
||||
if (xd->needs_uuid) {
|
||||
queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
|
||||
msecs_to_jiffies(100));
|
||||
} else {
|
||||
/* Start exchanging properties with the other host */
|
||||
queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
|
||||
msecs_to_jiffies(100));
|
||||
queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
|
||||
msecs_to_jiffies(1000));
|
||||
}
|
||||
xd->state = XDOMAIN_STATE_INIT;
|
||||
queue_delayed_work(xd->tb->wq, &xd->state_work,
|
||||
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
|
||||
}
|
||||
|
||||
static void stop_handshake(struct tb_xdomain *xd)
|
||||
{
|
||||
xd->uuid_retries = 0;
|
||||
xd->properties_retries = 0;
|
||||
xd->properties_changed_retries = 0;
|
||||
|
||||
cancel_delayed_work_sync(&xd->get_uuid_work);
|
||||
cancel_delayed_work_sync(&xd->get_properties_work);
|
||||
cancel_delayed_work_sync(&xd->properties_changed_work);
|
||||
cancel_delayed_work_sync(&xd->state_work);
|
||||
xd->properties_changed_retries = 0;
|
||||
xd->state_retries = 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
|
||||
@ -1389,8 +1862,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
|
||||
ida_init(&xd->in_hopids);
|
||||
ida_init(&xd->out_hopids);
|
||||
mutex_init(&xd->lock);
|
||||
INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
|
||||
INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
|
||||
INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
|
||||
INIT_DELAYED_WORK(&xd->properties_changed_work,
|
||||
tb_xdomain_properties_changed);
|
||||
|
||||
@ -1405,6 +1877,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
|
||||
goto err_free_local_uuid;
|
||||
} else {
|
||||
xd->needs_uuid = true;
|
||||
xd->bonding_possible = !!down->dual_link_port;
|
||||
}
|
||||
|
||||
device_initialize(&xd->dev);
|
||||
@ -1523,9 +1996,9 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = tb_port_wait_for_link_width(port, 2, 100);
|
||||
ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
|
||||
if (ret) {
|
||||
tb_port_warn(port, "timeout enabling lane bonding\n");
|
||||
tb_port_warn(port, "failed to enable lane bonding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -198,15 +198,15 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
|
||||
* @local_property_block_len: Length of the @local_property_block in dwords
|
||||
* @remote_properties: Properties exported by the remote domain
|
||||
* @remote_property_block_gen: Generation of @remote_properties
|
||||
* @get_uuid_work: Work used to retrieve @remote_uuid
|
||||
* @uuid_retries: Number of times left @remote_uuid is requested before
|
||||
* giving up
|
||||
* @get_properties_work: Work used to get remote domain properties
|
||||
* @properties_retries: Number of times left to read properties
|
||||
* @state: Next XDomain discovery state to run
|
||||
* @state_work: Work used to run the next state
|
||||
* @state_retries: Number of retries remain for the state
|
||||
* @properties_changed_work: Work used to notify the remote domain that
|
||||
* our properties have changed
|
||||
* @properties_changed_retries: Number of times left to send properties
|
||||
* changed notification
|
||||
* @bonding_possible: True if lane bonding is possible on local side
|
||||
* @target_link_width: Target link width from the remote host
|
||||
* @link: Root switch link the remote domain is connected (ICM only)
|
||||
* @depth: Depth in the chain the remote domain is connected (ICM only)
|
||||
*
|
||||
@ -244,12 +244,13 @@ struct tb_xdomain {
|
||||
u32 local_property_block_len;
|
||||
struct tb_property_dir *remote_properties;
|
||||
u32 remote_property_block_gen;
|
||||
struct delayed_work get_uuid_work;
|
||||
int uuid_retries;
|
||||
struct delayed_work get_properties_work;
|
||||
int properties_retries;
|
||||
int state;
|
||||
struct delayed_work state_work;
|
||||
int state_retries;
|
||||
struct delayed_work properties_changed_work;
|
||||
int properties_changed_retries;
|
||||
bool bonding_possible;
|
||||
u8 target_link_width;
|
||||
u8 link;
|
||||
u8 depth;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user