mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 23:39:18 +00:00
Third batch of patches intended for v5.2
* Bump the 20000-series FW API version supported; * Work on the new debugging infra continues; * One clean-up to prevent a bogus warning with clang; * A small cleanup in the PCI ID list; * Work on new hardware continues; * RTT confidence indication support for FTM; * An improvement in HE rate-scaling; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAly5eWAACgkQoUecoho8 xfqEIRAAlMWOsctlnqlA2oxuf4V/tAvI3rHPiPHbdrBGCM13muIihP/FXXFgDQTY k55o2Ugg5RTwitdhL3jU//meF+a755FvEqAD8BK1SfySid7frWvNMXIK8kWWp6Cw VzwG3gfZ0hV21P8gYdfg+HLwfiBHDKNGUAj08pA3jXAxA+tG6VrXNNGuHe5Jy3pK JxGNSQUtdHZPx6UVI5Em3Al+bfLmhXRlDDlZQl1amPY5+zHEvBJLSi9Mmf8UShle YDnFDUA3co6BDvuOaOM7j5buvb7sj+pEMQMTfhcnLtqo5cSF1eEbYHw0aZKYG9rl Ueej9moQoTNZWoy9DJVGrMWZd86XEmplgCEfrl7ricGVuOq73GWneeU7uuUnnMZr 5SiS6o54WalYE0sY8RfhDWRWgAdN1chGmtrPsq+iIfOoEfnTzIqfKW4sG0MqIVHP UNyx8aBM0ebjfa1s6xSVBwEEZYCCdOHl1O56UVcvnHzB+c82xj2nadO7ebOnBKBf CQnUKCNWbE1HbLiAJdGWYHTWGdMkswDMYl8Zqf0Q2KZ2uCpmWHbjgSyVqkkUnYR1 5vF+2kMJfNhjw5aBnnP4W5ZQSjIpp+Rtvnui9WiSYinXbfZ+b3EfJwx/2hKKq7d2 y5lCCeX5hGKVp7P5fIOuN7shmvNrdyywawcRWwtlWmVpwHtkUKQ= =JJSw -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2019-04-18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next Third batch of patches intended for v5.2 * Bump the 20000-series FW API version supported; * Work on the new debugging infra continues; * One clean-up to prevent a bogus warning with clang; * A small cleanup in the PCI ID list; * Work on new hardware continues; * RTT confidence indication support for FTM; * An improvement in HE rate-scaling;
This commit is contained in:
commit
b99561c513
@ -56,7 +56,7 @@
|
||||
#include "iwl-config.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 46
|
||||
#define IWL_22000_UCODE_API_MAX 47
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
@ -80,7 +80,6 @@
|
||||
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
|
||||
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
|
||||
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
|
||||
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
|
||||
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
|
||||
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
|
||||
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
|
||||
@ -105,8 +104,6 @@
|
||||
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
@ -420,19 +417,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
|
||||
.name = "Intel(R) Dual Band Wireless AX 22560",
|
||||
.fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
|
||||
IWL_DEVICE_22560,
|
||||
.cdb = true,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
|
||||
.name = "Intel(R) Wireless-AC 9560 160MHz",
|
||||
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
|
||||
@ -471,7 +455,6 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
@ -675,7 +675,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response)
|
||||
* @bssid: BSSID of the AP
|
||||
* @measure_status: current APs measurement status, one of
|
||||
* &enum iwl_tof_entry_status.
|
||||
@ -705,7 +705,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
|
||||
* @papd_calib_output: The result of the tof papd calibration that was injected
|
||||
* into the algorithm.
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 measure_status;
|
||||
u8 measure_bw;
|
||||
@ -727,6 +727,63 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
__le32 papd_calib_output;
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
|
||||
* @bssid: BSSID of the AP
|
||||
* @measure_status: current APs measurement status, one of
|
||||
* &enum iwl_tof_entry_status.
|
||||
* @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
|
||||
* @rtt: The Round Trip Time that took for the last measurement for
|
||||
* current AP [pSec]
|
||||
* @rtt_variance: The Variance of the RTT values measured for current AP
|
||||
* @rtt_spread: The Difference between the maximum and the minimum RTT
|
||||
* values measured for current AP in the current session [pSec]
|
||||
* @rssi: RSSI as uploaded in the Channel Estimation notification
|
||||
* @rssi_spread: The Difference between the maximum and the minimum RSSI values
|
||||
* measured for current AP in the current session
|
||||
* @last_burst: 1 if no more FTM sessions are scheduled for this responder
|
||||
* @refusal_period: refusal period in case of
|
||||
* @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
|
||||
* @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
|
||||
* uploaded by the LMAC
|
||||
* @start_tsf: measurement start time in TSF of the mac specified in the range
|
||||
* request
|
||||
* @rx_rate_n_flags: rate and flags of the last FTM frame received from this
|
||||
* responder
|
||||
* @tx_rate_n_flags: rate and flags of the last ack sent to this responder
|
||||
* @t2t3_initiator: as calculated from the algo in the initiator
|
||||
* @t1t4_responder: as calculated from the algo in the responder
|
||||
* @common_calib: Calib val that was used in for this AP measurement
|
||||
* @specific_calib: val that was used in for this AP measurement
|
||||
* @papd_calib_output: The result of the tof papd calibration that was injected
|
||||
* into the algorithm.
|
||||
* @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
|
||||
* @reserved: for alignment
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 measure_status;
|
||||
u8 measure_bw;
|
||||
__le32 rtt;
|
||||
__le32 rtt_variance;
|
||||
__le32 rtt_spread;
|
||||
s8 rssi;
|
||||
u8 rssi_spread;
|
||||
u8 last_burst;
|
||||
u8 refusal_period;
|
||||
__le32 timestamp;
|
||||
__le32 start_tsf;
|
||||
__le32 rx_rate_n_flags;
|
||||
__le32 tx_rate_n_flags;
|
||||
__le32 t2t3_initiator;
|
||||
__le32 t1t4_responder;
|
||||
__le16 common_calib;
|
||||
__le16 specific_calib;
|
||||
__le32 papd_calib_output;
|
||||
u8 rttConfidence;
|
||||
u8 reserved[3];
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* enum iwl_tof_response_status - tof response status
|
||||
*
|
||||
@ -760,6 +817,22 @@ struct iwl_tof_range_rsp_ntfy_v5 {
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification
|
||||
* @request_id: A Token ID of the corresponding Range request
|
||||
* @num_of_aps: Number of APs results
|
||||
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
|
||||
* @reserved: reserved
|
||||
* @ap: per-AP data
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ntfy_v6 {
|
||||
u8 request_id;
|
||||
u8 num_of_aps;
|
||||
u8 last_report;
|
||||
u8 reserved;
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ntfy - ranging response notification
|
||||
* @request_id: A Token ID of the corresponding Range request
|
||||
@ -774,7 +847,7 @@ struct iwl_tof_range_rsp_ntfy {
|
||||
u8 last_report;
|
||||
u8 reserved;
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
|
||||
|
||||
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
|
||||
/**
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -30,6 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -133,6 +135,7 @@ enum iwl_tx_queue_cfg_actions {
|
||||
|
||||
#define IWL_DEFAULT_QUEUE_SIZE 256
|
||||
#define IWL_MGMT_QUEUE_SIZE 16
|
||||
#define IWL_CMD_QUEUE_SIZE 32
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
|
||||
* @sta_id: station id
|
||||
|
@ -545,6 +545,7 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
|
||||
{ .start = 0x00a04590, .end = 0x00a04590 },
|
||||
{ .start = 0x00a04598, .end = 0x00a04598 },
|
||||
{ .start = 0x00a045c0, .end = 0x00a045f4 },
|
||||
{ .start = 0x00a05c18, .end = 0x00a05c1c },
|
||||
{ .start = 0x00a0c000, .end = 0x00a0c018 },
|
||||
{ .start = 0x00a0c020, .end = 0x00a0c028 },
|
||||
{ .start = 0x00a0c038, .end = 0x00a0c094 },
|
||||
@ -557,6 +558,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
|
||||
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = {
|
||||
{ .start = 0x00d03c00, .end = 0x00d03c64 },
|
||||
{ .start = 0x00d05c18, .end = 0x00d05c1c },
|
||||
{ .start = 0x00d0c000, .end = 0x00d0c174 },
|
||||
};
|
||||
|
||||
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
||||
u32 len_bytes, __le32 *data)
|
||||
{
|
||||
@ -675,7 +682,8 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
|
||||
u32 range_len;
|
||||
|
||||
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
/* TODO */
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
|
||||
handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
|
||||
} else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
|
||||
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
|
||||
@ -1685,6 +1693,9 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
!ops->fill_mem_hdr || !ops->fill_range))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
|
||||
num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
|
||||
|
||||
(*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
|
||||
@ -1698,7 +1709,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
range = ops->fill_mem_hdr(fwrt, reg, header);
|
||||
if (!range) {
|
||||
IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: failed to fill region header: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
@ -1708,7 +1720,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
int range_size = ops->fill_range(fwrt, reg, range, i);
|
||||
|
||||
if (range_size < 0) {
|
||||
IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: failed to dump region: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
@ -1734,8 +1747,12 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
continue;
|
||||
|
||||
reg = fwrt->dump.active_regs[reg_id];
|
||||
if (WARN(!reg, "Unassigned region %d\n", reg_id))
|
||||
if (!reg) {
|
||||
IWL_WARN(fwrt,
|
||||
"WRT: unassigned region id %d, skipping\n",
|
||||
reg_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (le32_to_cpu(reg->region_type)) {
|
||||
case IWL_FW_INI_REGION_DEVICE_MEMORY:
|
||||
@ -2108,6 +2125,12 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
if (!iwl_fw_ini_trigger_on(fwrt, id)) {
|
||||
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
|
||||
id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
delay = le32_to_cpu(active->trig->dump_delay);
|
||||
occur = le32_to_cpu(active->trig->occurrences);
|
||||
@ -2117,14 +2140,17 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
active->trig->occurrences = cpu_to_le32(--occur);
|
||||
|
||||
if (le32_to_cpu(active->trig->force_restart)) {
|
||||
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
|
||||
IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id);
|
||||
iwl_force_nmi(fwrt->trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
fwrt->dump.ini_trig_id = id;
|
||||
|
||||
IWL_WARN(fwrt, "Collecting data: ini trigger %d fired.\n", id);
|
||||
IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id);
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
|
||||
|
||||
@ -2262,12 +2288,12 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
|
||||
|
||||
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n");
|
||||
if (fwrt->trans->ini_valid)
|
||||
iwl_fw_error_ini_dump(fwrt);
|
||||
else
|
||||
iwl_fw_error_dump(fwrt);
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n");
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
||||
@ -2337,12 +2363,14 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
|
||||
if (!virtual_addr)
|
||||
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Allocated DRAM buffer[%d], size=0x%x\n",
|
||||
trans->num_blocks, size);
|
||||
|
||||
trans->fw_mon[trans->num_blocks].block = virtual_addr;
|
||||
trans->fw_mon[trans->num_blocks].physical = phys_addr;
|
||||
trans->fw_mon[trans->num_blocks].size = size;
|
||||
trans->num_blocks++;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
@ -2365,11 +2393,15 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
|
||||
if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
|
||||
"Invalid apply point %d for SMEM buffer allocation",
|
||||
pnt))
|
||||
"WRT: Invalid apply point %d for SMEM buffer allocation, aborting\n",
|
||||
pnt)) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying SMEM buffer destination\n");
|
||||
|
||||
/* set sram monitor by enabling bit 7 */
|
||||
iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2388,6 +2420,9 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
if (trans->num_blocks == 1)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying DRAM buffer[%d] destination\n", block_idx);
|
||||
|
||||
cmd->num_frags = cpu_to_le32(1);
|
||||
cmd->fragments[0].address =
|
||||
cpu_to_le64(trans->fw_mon[block_idx].physical);
|
||||
@ -2399,7 +2434,8 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_ucode_tlv *tlv)
|
||||
struct iwl_ucode_tlv *tlv,
|
||||
bool ext)
|
||||
{
|
||||
struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
|
||||
struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
|
||||
@ -2415,6 +2451,10 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n",
|
||||
ext, data->id, data->group);
|
||||
|
||||
iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||
}
|
||||
|
||||
@ -2431,17 +2471,20 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
|
||||
u32 type = le32_to_cpu(reg->region_type);
|
||||
|
||||
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
|
||||
"Invalid region id %d for apply point %d\n", id, pnt))
|
||||
"WRT: ext=%d. Invalid region id %d for apply point %d\n",
|
||||
ext, id, pnt))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_regs[id];
|
||||
|
||||
if (*active)
|
||||
IWL_WARN(fwrt->trans, "region TLV %d override\n", id);
|
||||
IWL_WARN(fwrt->trans,
|
||||
"WRT: ext=%d. Region id %d override\n",
|
||||
ext, id);
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"%s: apply point %d, activating region ID %d\n",
|
||||
__func__, pnt, id);
|
||||
"WRT: ext=%d. Activating region id %d\n",
|
||||
ext, id);
|
||||
|
||||
*active = reg;
|
||||
|
||||
@ -2449,7 +2492,13 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
|
||||
type == IWL_FW_INI_REGION_RXF)
|
||||
iter += le32_to_cpu(reg->fifos.num_of_registers) *
|
||||
sizeof(__le32);
|
||||
else if (type != IWL_FW_INI_REGION_DRAM_BUFFER)
|
||||
else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_MAC ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_AUX ||
|
||||
type == IWL_FW_INI_REGION_INTERNAL_BUFFER ||
|
||||
type == IWL_FW_INI_REGION_PAGING ||
|
||||
type == IWL_FW_INI_REGION_CSR)
|
||||
iter += le32_to_cpu(reg->internal.num_of_ranges) *
|
||||
sizeof(__le32);
|
||||
|
||||
@ -2468,7 +2517,8 @@ static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
ptr = krealloc(active->trig, size, GFP_KERNEL);
|
||||
if (!ptr) {
|
||||
IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id);
|
||||
IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n",
|
||||
id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
active->trig = ptr;
|
||||
@ -2492,7 +2542,9 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
u32 trig_regs_size = le32_to_cpu(trig->num_regions) *
|
||||
sizeof(__le32);
|
||||
|
||||
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
|
||||
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs),
|
||||
"WRT: ext=%d. Invalid trigger id %d for apply point %d\n",
|
||||
ext, id, apply_point))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
@ -2500,6 +2552,10 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
if (!active->active) {
|
||||
size_t trig_size = sizeof(*trig) + trig_regs_size;
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Activating trigger %d\n",
|
||||
ext, id);
|
||||
|
||||
if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
|
||||
trig_size))
|
||||
goto next;
|
||||
@ -2518,8 +2574,16 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
int mem_to_add = trig_regs_size;
|
||||
|
||||
if (region_override) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d regions override\n",
|
||||
ext, id);
|
||||
|
||||
mem_to_add -= active_regs * sizeof(__le32);
|
||||
} else {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d regions appending\n",
|
||||
ext, id);
|
||||
|
||||
offset += active_regs;
|
||||
new_regs += active_regs;
|
||||
}
|
||||
@ -2528,8 +2592,13 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
active->size + mem_to_add))
|
||||
goto next;
|
||||
|
||||
if (conf_override)
|
||||
if (conf_override) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d configuration override\n",
|
||||
ext, id);
|
||||
|
||||
memcpy(active->trig, trig, sizeof(*trig));
|
||||
}
|
||||
|
||||
memcpy(active->trig->data + offset, trig->data,
|
||||
trig_regs_size);
|
||||
@ -2570,11 +2639,11 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
|
||||
IWL_ERR(fwrt,
|
||||
"Invalid apply point %x for host command\n",
|
||||
pnt);
|
||||
"WRT: ext=%d. Invalid apply point %d for host command\n",
|
||||
ext, pnt);
|
||||
goto next;
|
||||
}
|
||||
iwl_fw_dbg_send_hcmd(fwrt, tlv);
|
||||
iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_REGIONS:
|
||||
iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
|
||||
@ -2585,7 +2654,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
|
||||
WARN_ONCE(1,
|
||||
"WRT: ext=%d. Invalid TLV 0x%x for apply point\n",
|
||||
ext, type);
|
||||
break;
|
||||
}
|
||||
next:
|
||||
@ -2599,6 +2670,8 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
void *data = &fwrt->trans->apply_points[apply_point];
|
||||
int i;
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point);
|
||||
|
||||
if (apply_point == IWL_FW_INI_APPLY_EARLY) {
|
||||
for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
|
||||
fwrt->dump.active_regs[i] = NULL;
|
||||
|
@ -311,6 +311,7 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
|
||||
IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
|
@ -578,7 +578,6 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
|
||||
extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
|
||||
|
@ -138,7 +138,7 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
if (le32_to_cpu(hdr->tlv_version) != 1)
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
|
||||
IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
|
||||
le32_to_cpu(tlv->type), apply);
|
||||
|
||||
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
|
||||
|
@ -218,5 +218,7 @@ do { \
|
||||
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
|
||||
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
|
||||
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
|
||||
#define IWL_DEBUG_FW_INFO(p, f, a...) \
|
||||
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
|
||||
|
||||
#endif
|
||||
|
@ -252,8 +252,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||
cfg->fw_name_pre, tag);
|
||||
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
|
||||
drv->trans->dev,
|
||||
@ -1318,8 +1318,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
if (!ucode_raw)
|
||||
goto try_again;
|
||||
|
||||
IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
drv->firmware_name, ucode_raw->size);
|
||||
IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
drv->firmware_name, ucode_raw->size);
|
||||
|
||||
/* Make sure that we got at least the API version number */
|
||||
if (ucode_raw->size < 4) {
|
||||
|
@ -480,6 +480,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
|
||||
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
|
||||
struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
|
||||
int i;
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
@ -519,7 +520,12 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
int peer_idx;
|
||||
|
||||
if (new_api) {
|
||||
fw_ap = &fw_resp->ap[i];
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
|
||||
fw_ap = &fw_resp->ap[i];
|
||||
else
|
||||
fw_ap = (void *)&fw_resp_v6->ap[i];
|
||||
|
||||
result.final = fw_resp->ap[i].last_burst;
|
||||
} else {
|
||||
/* the first part is the same for old and new APIs */
|
||||
@ -588,6 +594,11 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
mvm->ftm_initiator.req,
|
||||
&result, GFP_KERNEL);
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
|
||||
IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
|
||||
fw_ap->rttConfidence);
|
||||
|
||||
iwl_mvm_debug_range_resp(mvm, i, &result);
|
||||
}
|
||||
|
||||
|
@ -116,8 +116,9 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
|
||||
return supp;
|
||||
}
|
||||
|
||||
static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta)
|
||||
static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_supported_band *sband)
|
||||
{
|
||||
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
||||
@ -147,6 +148,12 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
|
||||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
/* consider our LDPC support in case of HE */
|
||||
if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
|
||||
!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
|
||||
flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
if (he_cap && he_cap->has_he &&
|
||||
(he_cap->he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
|
||||
@ -383,13 +390,13 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
|
||||
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
|
||||
struct iwl_tlc_config_cmd cfg_cmd = {
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.max_ch_width = update ?
|
||||
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
|
||||
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
|
||||
.flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
|
||||
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
|
||||
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
|
||||
.max_mpdu_len = cpu_to_le16(max_amsdu_len),
|
||||
@ -402,7 +409,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_mvm_reset_frame_stats(mvm);
|
||||
#endif
|
||||
sband = hw->wiphy->bands[band];
|
||||
rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
|
||||
|
||||
/*
|
||||
|
@ -746,7 +746,8 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
||||
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
|
||||
u8 sta_id, u8 tid, unsigned int timeout)
|
||||
{
|
||||
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
|
||||
int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
mvm->trans->cfg->min_256_ba_txq_size);
|
||||
|
||||
if (tid == IWL_MAX_TID_COUNT) {
|
||||
tid = IWL_MGMT_TID;
|
||||
@ -2109,12 +2110,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
queue = mvm->probe_queue;
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
queue = mvm->p2p_dev_queue;
|
||||
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
|
||||
} else {
|
||||
WARN(1, "Missing required TXQ for adding bcast STA\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
|
||||
|
@ -66,7 +66,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
void *iml_img;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
|
||||
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
|
||||
/* Allocate prph scratch */
|
||||
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -210,7 +210,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
ctxt_info->hcmd_cfg.cmd_queue_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
ctxt_info->hcmd_cfg.cmd_queue_size =
|
||||
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
|
||||
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
|
||||
|
@ -928,11 +928,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
|
||||
|
@ -290,10 +290,6 @@ struct iwl_cmd_meta {
|
||||
u32 tbs;
|
||||
};
|
||||
|
||||
|
||||
#define TFD_TX_CMD_SLOTS 256
|
||||
#define TFD_CMD_SLOTS 32
|
||||
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need to copy some data
|
||||
* into the buffer regardless of whether it should be mapped or not.
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -234,7 +234,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
|
||||
int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
|
||||
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
|
@ -896,6 +896,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||
if (!trans->num_blocks)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying DRAM buffer[0] destination\n");
|
||||
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
|
||||
trans->fw_mon[0].physical >>
|
||||
MON_BUFF_SHIFT_VER2);
|
||||
|
@ -996,10 +996,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
|
||||
@ -1050,10 +1050,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
|
Loading…
x
Reference in New Issue
Block a user