mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
This tag contains habanalabs driver changes for v6.2:
- New feature of graceful hard-reset. Instead of immediately killing the user-process when a command submission times out, we wait a bit and give the user-process notification and let it try to close things gracefully, with the ability to retrieve debug information. - Enhance the EventFD mechanism. Add new events such as access to illegal address (RAZWI), page fault, device unavailable. In addition, change the event workqueue to be handled in a single-threaded workqueue. - Allow the control device to work during reset of the ASIC, to enable monitoring applications to continue getting the data. - Add handling for Gaudi2 with PCI revision 2. - Reduce severity of prints due to power/thermal events. - Change how we use the h/w to perform memory scrubbing in Gaudi2. - Multiple bug fixes, refactors and renames. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEE7TEboABC71LctBLFZR1NuKta54AFAmN+QSEACgkQZR1NuKta 54Ducgf+PsD85BWXlqWkTa2S8tn7h+OCETapAEY+JMRu1UB15ccLGZlH1O/L2try NBjUEcbzvS1KPYmNMubKXOIlacTJrukaoqvtMfLSe1+Y/zvfTpF1+LGLp39wSRo8 R36A1VEQxalSZoFhQMERVoWBfjiVaW3ENqe3H9Vb/ab9QdMUzP4P4uaLsECtsLSy ft31ZcN+jPjhjSYC7xZjzd3KXvVqlQ/5TsXdX6nsxphrOUiKxT55Gsypkx5O4vt1 Q4aw+v3Z0NgknDF90n7O90y/wgE3OqKHiKl+9l7lS/WkLhhaWknJ9zJlfLI8uiEH UjMku/EpH6SSN5hrCDQvtFaXJSgiWQ== =cHXD -----END PGP SIGNATURE----- Merge tag 'misc-habanalabs-next-2022-11-23' of https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux into char-misc-next Oded writes: This tag contains habanalabs driver changes for v6.2: - New feature of graceful hard-reset. Instead of immediately killing the user-process when a command submission times out, we wait a bit and give the user-process notification and let it try to close things gracefully, with the ability to retrieve debug information. - Enhance the EventFD mechanism. Add new events such as access to illegal address (RAZWI), page fault, device unavailable. In addition, change the event workqueue to be handled in a single-threaded workqueue. - Allow the control device to work during reset of the ASIC, to enable monitoring applications to continue getting the data. - Add handling for Gaudi2 with PCI revision 2. - Reduce severity of prints due to power/thermal events. - Change how we use the h/w to perform memory scrubbing in Gaudi2. - Multiple bug fixes, refactors and renames. * tag 'misc-habanalabs-next-2022-11-23' of https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux: (63 commits) habanalabs: fix VA range calculation habanalabs: fail driver load if EEPROM errors detected habanalabs: make print of engines idle mask more readable habanalabs: clear non-released encapsulated signals habanalabs: don't put context in hl_encaps_handle_do_release_sob() habanalabs: print context refcount value if hard reset fails habanalabs: add RMWREG32_SHIFTED to set a val within a mask habanalabs: fix rc when new CPUCP opcodes are not supported habanalabs/gaudi2: added memset for the cq_size register habanalabs: added return value check for hl_fw_dynamic_send_clear_cmd() habanalabs: increase the size of busy engines mask habanalabs/gaudi2: change memory scrub mechanism habanalabs: extend process wait timeout in device fine habanalabs: check schedule_hard_reset correctly habanalabs: reset device if still in use when released habanalabs/gaudi2: return to reset upon SM SEI BRESP error habanalabs/gaudi2: don't enable entries in the MSIX_GW table habanalabs/gaudi2: remove redundant firmware version check habanalabs/gaudi: fix print for firmware-alive event habanalabs: fix print for out-of-sync and pkt-failure events ...
This commit is contained in:
commit
ae27e8869f
@ -91,6 +91,13 @@ Description: Enables the root user to set the device to specific state.
|
||||
Valid values are "disable", "enable", "suspend", "resume".
|
||||
User can read this property to see the valid values
|
||||
|
||||
What: /sys/kernel/debug/habanalabs/hl<n>/device_release_watchdog_timeout
|
||||
Date: Oct 2022
|
||||
KernelVersion: 6.2
|
||||
Contact: ttayar@habana.ai
|
||||
Description: The watchdog timeout value in seconds for a device relese upon
|
||||
certain error cases, after which the device is reset.
|
||||
|
||||
What: /sys/kernel/debug/habanalabs/hl<n>/dma_size
|
||||
Date: Apr 2021
|
||||
KernelVersion: 5.13
|
||||
|
@ -742,13 +742,11 @@ static void cs_do_release(struct kref *ref)
|
||||
*/
|
||||
if (hl_cs_cmpl->encaps_signals)
|
||||
kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
|
||||
hl_encaps_handle_do_release);
|
||||
hl_encaps_release_handle_and_put_ctx);
|
||||
}
|
||||
|
||||
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
|
||||
&& cs->encaps_signals)
|
||||
kref_put(&cs->encaps_sig_hdl->refcount,
|
||||
hl_encaps_handle_do_release);
|
||||
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals)
|
||||
kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
|
||||
|
||||
out:
|
||||
/* Must be called before hl_ctx_put because inside we use ctx to get
|
||||
@ -798,7 +796,7 @@ static void cs_do_release(struct kref *ref)
|
||||
static void cs_timedout(struct work_struct *work)
|
||||
{
|
||||
struct hl_device *hdev;
|
||||
u64 event_mask;
|
||||
u64 event_mask = 0x0;
|
||||
int rc;
|
||||
struct hl_cs *cs = container_of(work, struct hl_cs,
|
||||
work_tdr.work);
|
||||
@ -830,11 +828,7 @@ static void cs_timedout(struct work_struct *work)
|
||||
if (rc) {
|
||||
hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
|
||||
hdev->captured_err_info.cs_timeout.seq = cs->sequence;
|
||||
|
||||
event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
|
||||
HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
|
||||
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
event_mask |= HL_NOTIFIER_EVENT_CS_TIMEOUT;
|
||||
}
|
||||
|
||||
switch (cs->type) {
|
||||
@ -869,8 +863,12 @@ static void cs_timedout(struct work_struct *work)
|
||||
|
||||
cs_put(cs);
|
||||
|
||||
if (device_reset)
|
||||
hl_device_reset(hdev, HL_DRV_RESET_TDR);
|
||||
if (device_reset) {
|
||||
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
|
||||
hl_device_cond_reset(hdev, HL_DRV_RESET_TDR, event_mask);
|
||||
} else if (event_mask) {
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
|
||||
@ -1011,6 +1009,34 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
|
||||
hl_complete_job(hdev, job);
|
||||
}
|
||||
|
||||
/*
|
||||
* release_reserved_encaps_signals() - release reserved encapsulated signals.
|
||||
* @hdev: pointer to habanalabs device structure
|
||||
*
|
||||
* Release reserved encapsulated signals which weren't un-reserved, or for which a CS with
|
||||
* encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back.
|
||||
* For these signals need also to put the refcount of the H/W SOB which was taken at the
|
||||
* reservation.
|
||||
*/
|
||||
static void release_reserved_encaps_signals(struct hl_device *hdev)
|
||||
{
|
||||
struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
|
||||
struct hl_cs_encaps_sig_handle *handle;
|
||||
struct hl_encaps_signals_mgr *mgr;
|
||||
u32 id;
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
mgr = &ctx->sig_mgr;
|
||||
|
||||
idr_for_each_entry(&mgr->handles, handle, id)
|
||||
if (handle->cs_seq == ULLONG_MAX)
|
||||
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
|
||||
|
||||
hl_ctx_put(ctx);
|
||||
}
|
||||
|
||||
void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
|
||||
{
|
||||
int i;
|
||||
@ -1039,6 +1065,8 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
|
||||
}
|
||||
|
||||
force_complete_multi_cs(hdev);
|
||||
|
||||
release_reserved_encaps_signals(hdev);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2001,6 +2029,8 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
|
||||
*/
|
||||
handle->pre_sob_val = prop->next_sob_val - handle->count;
|
||||
|
||||
handle->cs_seq = ULLONG_MAX;
|
||||
|
||||
*signals_count = prop->next_sob_val;
|
||||
hdev->asic_funcs->hw_queues_unlock(hdev);
|
||||
|
||||
@ -2350,10 +2380,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
|
||||
/* We finished with the CS in this function, so put the ref */
|
||||
cs_put(cs);
|
||||
free_cs_chunk_array:
|
||||
if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
|
||||
is_wait_cs)
|
||||
kref_put(&encaps_sig_hdl->refcount,
|
||||
hl_encaps_handle_do_release);
|
||||
if (!wait_cs_submitted && cs_encaps_signals && handle_found && is_wait_cs)
|
||||
kref_put(&encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx);
|
||||
kfree(cs_chunk_array);
|
||||
out:
|
||||
return rc;
|
||||
|
@ -9,38 +9,46 @@
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
void hl_encaps_handle_do_release(struct kref *ref)
|
||||
static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
|
||||
bool put_ctx)
|
||||
{
|
||||
struct hl_cs_encaps_sig_handle *handle =
|
||||
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
|
||||
struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
|
||||
|
||||
if (put_hw_sob)
|
||||
hw_sob_put(handle->hw_sob);
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
idr_remove(&mgr->handles, handle->id);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
hl_ctx_put(handle->ctx);
|
||||
if (put_ctx)
|
||||
hl_ctx_put(handle->ctx);
|
||||
|
||||
kfree(handle);
|
||||
}
|
||||
|
||||
static void hl_encaps_handle_do_release_sob(struct kref *ref)
|
||||
void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
|
||||
{
|
||||
struct hl_cs_encaps_sig_handle *handle =
|
||||
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
|
||||
struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
|
||||
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
|
||||
|
||||
/* if we're here, then there was a signals reservation but cs with
|
||||
* encaps signals wasn't submitted, so need to put refcount
|
||||
* to hw_sob taken at the reservation.
|
||||
*/
|
||||
hw_sob_put(handle->hw_sob);
|
||||
encaps_handle_do_release(handle, false, true);
|
||||
}
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
idr_remove(&mgr->handles, handle->id);
|
||||
spin_unlock(&mgr->lock);
|
||||
static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
|
||||
{
|
||||
struct hl_cs_encaps_sig_handle *handle =
|
||||
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
|
||||
|
||||
hl_ctx_put(handle->ctx);
|
||||
kfree(handle);
|
||||
encaps_handle_do_release(handle, true, false);
|
||||
}
|
||||
|
||||
void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
|
||||
{
|
||||
struct hl_cs_encaps_sig_handle *handle =
|
||||
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
|
||||
|
||||
encaps_handle_do_release(handle, true, true);
|
||||
}
|
||||
|
||||
static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
|
||||
@ -49,8 +57,7 @@ static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
|
||||
idr_init(&mgr->handles);
|
||||
}
|
||||
|
||||
static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
|
||||
struct hl_encaps_signals_mgr *mgr)
|
||||
static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
|
||||
{
|
||||
struct hl_cs_encaps_sig_handle *handle;
|
||||
struct idr *idp;
|
||||
@ -58,11 +65,14 @@ static void hl_encaps_sig_mgr_fini(struct hl_device *hdev,
|
||||
|
||||
idp = &mgr->handles;
|
||||
|
||||
/* The IDR is expected to be empty at this stage, because any left signal should have been
|
||||
* released as part of CS roll-back.
|
||||
*/
|
||||
if (!idr_is_empty(idp)) {
|
||||
dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
|
||||
dev_warn(hdev->dev,
|
||||
"device released while some encaps signals handles are still allocated\n");
|
||||
idr_for_each_entry(idp, handle, id)
|
||||
kref_put(&handle->refcount,
|
||||
hl_encaps_handle_do_release_sob);
|
||||
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
|
||||
}
|
||||
|
||||
idr_destroy(&mgr->handles);
|
||||
|
@ -1769,6 +1769,11 @@ void hl_debugfs_add_device(struct hl_device *hdev)
|
||||
dev_entry,
|
||||
&hl_timeout_locked_fops);
|
||||
|
||||
debugfs_create_u32("device_release_watchdog_timeout",
|
||||
0644,
|
||||
dev_entry->root,
|
||||
&hdev->device_release_watchdog_timeout_sec);
|
||||
|
||||
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
|
||||
debugfs_create_file(hl_debugfs_list[i].name,
|
||||
0444,
|
||||
|
@ -12,10 +12,13 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <trace/events/habanalabs.h>
|
||||
|
||||
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
|
||||
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
|
||||
|
||||
#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5
|
||||
|
||||
enum dma_alloc_type {
|
||||
DMA_ALLOC_COHERENT,
|
||||
@ -31,6 +34,7 @@ enum dma_alloc_type {
|
||||
* @hdev: pointer to habanalabs device structure.
|
||||
* @addr: the address the caller wants to access.
|
||||
* @region: the PCI region.
|
||||
* @new_bar_region_base: the new BAR region base address.
|
||||
*
|
||||
* @return: the old BAR base address on success, U64_MAX for failure.
|
||||
* The caller should set it back to the old address after use.
|
||||
@ -40,7 +44,8 @@ enum dma_alloc_type {
|
||||
* This function can be called also if the bar doesn't need to be set,
|
||||
* in that case it just won't change the base.
|
||||
*/
|
||||
static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region)
|
||||
static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
|
||||
u64 *new_bar_region_base)
|
||||
{
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
u64 bar_base_addr, old_base;
|
||||
@ -54,27 +59,28 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi
|
||||
old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
|
||||
|
||||
/* in case of success we need to update the new BAR base */
|
||||
if (old_base != U64_MAX)
|
||||
region->region_base = bar_base_addr;
|
||||
if ((old_base != U64_MAX) && new_bar_region_base)
|
||||
*new_bar_region_base = bar_base_addr;
|
||||
|
||||
return old_base;
|
||||
}
|
||||
|
||||
static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
|
||||
enum debugfs_access_type acc_type, enum pci_region region_type)
|
||||
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
|
||||
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
|
||||
{
|
||||
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
|
||||
u64 old_base = 0, rc, bar_region_base = region->region_base;
|
||||
void __iomem *acc_addr;
|
||||
u64 old_base = 0, rc;
|
||||
|
||||
if (region_type == PCI_REGION_DRAM) {
|
||||
old_base = hl_set_dram_bar(hdev, addr, region);
|
||||
if (set_dram_bar) {
|
||||
old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
|
||||
if (old_base == U64_MAX)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base +
|
||||
region->offset_in_bar;
|
||||
acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
|
||||
(addr - bar_region_base);
|
||||
|
||||
switch (acc_type) {
|
||||
case DEBUGFS_READ8:
|
||||
*val = readb(acc_addr);
|
||||
@ -96,8 +102,8 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
|
||||
break;
|
||||
}
|
||||
|
||||
if (region_type == PCI_REGION_DRAM) {
|
||||
rc = hl_set_dram_bar(hdev, old_base, region);
|
||||
if (set_dram_bar) {
|
||||
rc = hl_set_dram_bar(hdev, old_base, region, NULL);
|
||||
if (rc == U64_MAX)
|
||||
return -EIO;
|
||||
}
|
||||
@ -134,6 +140,9 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
|
||||
dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
|
||||
const char *caller)
|
||||
{
|
||||
/* this is needed to avoid warning on using freed pointer */
|
||||
u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
|
||||
|
||||
switch (alloc_type) {
|
||||
case DMA_ALLOC_COHERENT:
|
||||
hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
|
||||
@ -146,7 +155,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
|
||||
break;
|
||||
}
|
||||
|
||||
trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller);
|
||||
trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
|
||||
}
|
||||
|
||||
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
|
||||
@ -279,7 +288,7 @@ int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
|
||||
case PCI_REGION_SRAM:
|
||||
case PCI_REGION_DRAM:
|
||||
return hl_access_sram_dram_region(hdev, addr, val, acc_type,
|
||||
region_type);
|
||||
region_type, (region_type == PCI_REGION_DRAM));
|
||||
default:
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -355,10 +364,49 @@ bool hl_device_operational(struct hl_device *hdev,
|
||||
}
|
||||
}
|
||||
|
||||
bool hl_ctrl_device_operational(struct hl_device *hdev,
|
||||
enum hl_device_status *status)
|
||||
{
|
||||
enum hl_device_status current_status;
|
||||
|
||||
current_status = hl_device_status(hdev);
|
||||
if (status)
|
||||
*status = current_status;
|
||||
|
||||
switch (current_status) {
|
||||
case HL_DEVICE_STATUS_MALFUNCTION:
|
||||
return false;
|
||||
case HL_DEVICE_STATUS_IN_RESET:
|
||||
case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
|
||||
case HL_DEVICE_STATUS_NEEDS_RESET:
|
||||
case HL_DEVICE_STATUS_OPERATIONAL:
|
||||
case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static void print_idle_status_mask(struct hl_device *hdev, const char *message,
|
||||
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
|
||||
{
|
||||
u32 pad_width[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {};
|
||||
|
||||
BUILD_BUG_ON(HL_BUSY_ENGINES_MASK_EXT_SIZE != 4);
|
||||
|
||||
pad_width[3] = idle_mask[3] ? 16 : 0;
|
||||
pad_width[2] = idle_mask[2] || pad_width[3] ? 16 : 0;
|
||||
pad_width[1] = idle_mask[1] || pad_width[2] ? 16 : 0;
|
||||
pad_width[0] = idle_mask[0] || pad_width[1] ? 16 : 0;
|
||||
|
||||
dev_err(hdev->dev, "%s (mask %0*llx_%0*llx_%0*llx_%0*llx)\n",
|
||||
message, pad_width[3], idle_mask[3], pad_width[2], idle_mask[2],
|
||||
pad_width[1], idle_mask[1], pad_width[0], idle_mask[0]);
|
||||
}
|
||||
|
||||
static void hpriv_release(struct kref *ref)
|
||||
{
|
||||
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
|
||||
bool device_is_idle = true;
|
||||
bool reset_device, device_is_idle = true;
|
||||
struct hl_fpriv *hpriv;
|
||||
struct hl_device *hdev;
|
||||
|
||||
@ -375,15 +423,19 @@ static void hpriv_release(struct kref *ref)
|
||||
mutex_destroy(&hpriv->ctx_lock);
|
||||
mutex_destroy(&hpriv->restore_phase_mutex);
|
||||
|
||||
if ((!hdev->pldm) && (hdev->pdev) &&
|
||||
(!hdev->asic_funcs->is_device_idle(hdev,
|
||||
idle_mask,
|
||||
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
|
||||
dev_err(hdev->dev,
|
||||
"device not idle after user context is closed (0x%llx_%llx)\n",
|
||||
idle_mask[1], idle_mask[0]);
|
||||
/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
|
||||
* reset that waits for device release.
|
||||
*/
|
||||
reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
|
||||
|
||||
device_is_idle = false;
|
||||
/* Unless device is reset in any case, check idle status and reset if device is not idle */
|
||||
if (!reset_device && hdev->pdev && !hdev->pldm)
|
||||
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
|
||||
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
|
||||
if (!device_is_idle) {
|
||||
print_idle_status_mask(hdev, "device is not idle after user context is closed",
|
||||
idle_mask);
|
||||
reset_device = true;
|
||||
}
|
||||
|
||||
/* We need to remove the user from the list to make sure the reset process won't
|
||||
@ -399,9 +451,10 @@ static void hpriv_release(struct kref *ref)
|
||||
list_del(&hpriv->dev_node);
|
||||
mutex_unlock(&hdev->fpriv_list_lock);
|
||||
|
||||
if (!device_is_idle || hdev->reset_upon_device_release) {
|
||||
if (reset_device) {
|
||||
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
|
||||
} else {
|
||||
/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
|
||||
int rc = hdev->asic_funcs->scrub_device_mem(hdev);
|
||||
|
||||
if (rc)
|
||||
@ -468,9 +521,10 @@ static int hl_device_release(struct inode *inode, struct file *filp)
|
||||
|
||||
hdev->compute_ctx_in_release = 1;
|
||||
|
||||
if (!hl_hpriv_put(hpriv))
|
||||
dev_notice(hdev->dev,
|
||||
"User process closed FD but device still in use\n");
|
||||
if (!hl_hpriv_put(hpriv)) {
|
||||
dev_notice(hdev->dev, "User process closed FD but device still in use\n");
|
||||
hl_device_reset(hdev, HL_DRV_RESET_HARD);
|
||||
}
|
||||
|
||||
hdev->last_open_session_duration_jif =
|
||||
jiffies - hdev->last_successful_open_jif;
|
||||
@ -658,17 +712,42 @@ static void device_hard_reset_pending(struct work_struct *work)
|
||||
flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
|
||||
|
||||
rc = hl_device_reset(hdev, flags);
|
||||
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
|
||||
dev_info(hdev->dev,
|
||||
"Could not reset device. will try again in %u seconds",
|
||||
HL_PENDING_RESET_PER_SEC);
|
||||
|
||||
queue_delayed_work(device_reset_work->wq,
|
||||
&device_reset_work->reset_work,
|
||||
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
|
||||
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
|
||||
struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
|
||||
|
||||
if (ctx) {
|
||||
/* The read refcount value should subtracted by one, because the read is
|
||||
* protected with hl_get_compute_ctx().
|
||||
*/
|
||||
dev_info(hdev->dev,
|
||||
"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
|
||||
kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
|
||||
hl_ctx_put(ctx);
|
||||
} else {
|
||||
dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
|
||||
HL_PENDING_RESET_PER_SEC);
|
||||
}
|
||||
|
||||
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
|
||||
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
|
||||
}
|
||||
}
|
||||
|
||||
static void device_release_watchdog_func(struct work_struct *work)
|
||||
{
|
||||
struct hl_device_reset_work *device_release_watchdog_work =
|
||||
container_of(work, struct hl_device_reset_work, reset_work.work);
|
||||
struct hl_device *hdev = device_release_watchdog_work->hdev;
|
||||
u32 flags;
|
||||
|
||||
dev_dbg(hdev->dev, "Device wasn't released in time. Initiate device reset.\n");
|
||||
|
||||
flags = device_release_watchdog_work->flags | HL_DRV_RESET_FROM_WD_THR;
|
||||
|
||||
hl_device_reset(hdev, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* device_early_init - do some early initialization for the habanalabs device
|
||||
*
|
||||
@ -699,9 +778,10 @@ static int device_early_init(struct hl_device *hdev)
|
||||
gaudi2_set_asic_funcs(hdev);
|
||||
strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
|
||||
break;
|
||||
case ASIC_GAUDI2_SEC:
|
||||
case ASIC_GAUDI2B:
|
||||
gaudi2_set_asic_funcs(hdev);
|
||||
strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name));
|
||||
strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
|
||||
break;
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
|
||||
@ -737,7 +817,7 @@ static int device_early_init(struct hl_device *hdev)
|
||||
}
|
||||
}
|
||||
|
||||
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
|
||||
hdev->eq_wq = create_singlethread_workqueue("hl-events");
|
||||
if (hdev->eq_wq == NULL) {
|
||||
dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
|
||||
rc = -ENOMEM;
|
||||
@ -760,8 +840,8 @@ static int device_early_init(struct hl_device *hdev)
|
||||
goto free_cs_cmplt_wq;
|
||||
}
|
||||
|
||||
hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
|
||||
if (!hdev->pf_wq) {
|
||||
hdev->prefetch_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
|
||||
if (!hdev->prefetch_wq) {
|
||||
dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
|
||||
rc = -ENOMEM;
|
||||
goto free_ts_free_wq;
|
||||
@ -771,7 +851,7 @@ static int device_early_init(struct hl_device *hdev)
|
||||
GFP_KERNEL);
|
||||
if (!hdev->hl_chip_info) {
|
||||
rc = -ENOMEM;
|
||||
goto free_pf_wq;
|
||||
goto free_prefetch_wq;
|
||||
}
|
||||
|
||||
rc = hl_mmu_if_set_funcs(hdev);
|
||||
@ -780,19 +860,21 @@ static int device_early_init(struct hl_device *hdev)
|
||||
|
||||
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
|
||||
|
||||
hdev->device_reset_work.wq =
|
||||
create_singlethread_workqueue("hl_device_reset");
|
||||
if (!hdev->device_reset_work.wq) {
|
||||
hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
|
||||
if (!hdev->reset_wq) {
|
||||
rc = -ENOMEM;
|
||||
dev_err(hdev->dev, "Failed to create device reset WQ\n");
|
||||
goto free_cb_mgr;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
|
||||
device_hard_reset_pending);
|
||||
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
|
||||
hdev->device_reset_work.hdev = hdev;
|
||||
hdev->device_fini_pending = 0;
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
|
||||
device_release_watchdog_func);
|
||||
hdev->device_release_watchdog_work.hdev = hdev;
|
||||
|
||||
mutex_init(&hdev->send_cpu_message_lock);
|
||||
mutex_init(&hdev->debug_lock);
|
||||
INIT_LIST_HEAD(&hdev->cs_mirror_list);
|
||||
@ -810,8 +892,8 @@ static int device_early_init(struct hl_device *hdev)
|
||||
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
|
||||
free_chip_info:
|
||||
kfree(hdev->hl_chip_info);
|
||||
free_pf_wq:
|
||||
destroy_workqueue(hdev->pf_wq);
|
||||
free_prefetch_wq:
|
||||
destroy_workqueue(hdev->prefetch_wq);
|
||||
free_ts_free_wq:
|
||||
destroy_workqueue(hdev->ts_free_obj_wq);
|
||||
free_cs_cmplt_wq:
|
||||
@ -854,11 +936,11 @@ static void device_early_fini(struct hl_device *hdev)
|
||||
|
||||
kfree(hdev->hl_chip_info);
|
||||
|
||||
destroy_workqueue(hdev->pf_wq);
|
||||
destroy_workqueue(hdev->prefetch_wq);
|
||||
destroy_workqueue(hdev->ts_free_obj_wq);
|
||||
destroy_workqueue(hdev->cs_cmplt_wq);
|
||||
destroy_workqueue(hdev->eq_wq);
|
||||
destroy_workqueue(hdev->device_reset_work.wq);
|
||||
destroy_workqueue(hdev->reset_wq);
|
||||
|
||||
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
|
||||
destroy_workqueue(hdev->cq_wq[i]);
|
||||
@ -962,11 +1044,16 @@ static void device_late_fini(struct hl_device *hdev)
|
||||
|
||||
int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
|
||||
{
|
||||
u64 max_power, curr_power, dc_power, dividend;
|
||||
u64 max_power, curr_power, dc_power, dividend, divisor;
|
||||
int rc;
|
||||
|
||||
max_power = hdev->max_power;
|
||||
dc_power = hdev->asic_prop.dc_power_default;
|
||||
divisor = max_power - dc_power;
|
||||
if (!divisor) {
|
||||
dev_warn(hdev->dev, "device utilization is not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
rc = hl_fw_cpucp_power_get(hdev, &curr_power);
|
||||
|
||||
if (rc)
|
||||
@ -975,7 +1062,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
|
||||
curr_power = clamp(curr_power, dc_power, max_power);
|
||||
|
||||
dividend = (curr_power - dc_power) * 100;
|
||||
*utilization = (u32) div_u64(dividend, (max_power - dc_power));
|
||||
*utilization = (u32) div_u64(dividend, divisor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1053,7 +1140,7 @@ static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_r
|
||||
hl_cs_rollback_all(hdev, skip_wq_flush);
|
||||
|
||||
/* flush the MMU prefetch workqueue */
|
||||
flush_workqueue(hdev->pf_wq);
|
||||
flush_workqueue(hdev->prefetch_wq);
|
||||
|
||||
/* Release all pending user interrupts, each pending user interrupt
|
||||
* holds a reference to user context
|
||||
@ -1264,6 +1351,10 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
|
||||
{
|
||||
u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
|
||||
|
||||
/* No consecutive mechanism when user context exists */
|
||||
if (hdev->is_compute_ctx_active)
|
||||
return;
|
||||
|
||||
/*
|
||||
* 'reset cause' is being updated here, because getting here
|
||||
* means that it's the 1st time and the last time we're here
|
||||
@ -1337,8 +1428,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
|
||||
int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
{
|
||||
bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
|
||||
reset_upon_device_release = false, schedule_hard_reset = false,
|
||||
skip_wq_flush, delay_reset;
|
||||
reset_upon_device_release = false, schedule_hard_reset = false, delay_reset,
|
||||
from_dev_release, from_watchdog_thread;
|
||||
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
|
||||
struct hl_ctx *ctx;
|
||||
int i, rc;
|
||||
@ -1351,8 +1442,9 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
hard_reset = !!(flags & HL_DRV_RESET_HARD);
|
||||
from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
|
||||
fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
|
||||
skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
|
||||
from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
|
||||
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
|
||||
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
|
||||
|
||||
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
|
||||
hard_instead_soft = true;
|
||||
@ -1409,6 +1501,23 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
|
||||
spin_unlock(&hdev->reset_info.lock);
|
||||
|
||||
/* Cancel the device release watchdog work if required.
|
||||
* In case of reset-upon-device-release while the release watchdog work is
|
||||
* scheduled, do hard-reset instead of compute-reset.
|
||||
*/
|
||||
if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
|
||||
hdev->reset_info.watchdog_active = 0;
|
||||
if (!from_watchdog_thread)
|
||||
cancel_delayed_work_sync(
|
||||
&hdev->device_release_watchdog_work.reset_work);
|
||||
|
||||
if (from_dev_release) {
|
||||
flags |= HL_DRV_RESET_HARD;
|
||||
flags &= ~HL_DRV_RESET_DEV_RELEASE;
|
||||
hard_reset = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (delay_reset)
|
||||
usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
|
||||
|
||||
@ -1439,13 +1548,12 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
* Because the reset function can't run from heartbeat work,
|
||||
* we need to call the reset function from a dedicated work.
|
||||
*/
|
||||
queue_delayed_work(hdev->device_reset_work.wq,
|
||||
&hdev->device_reset_work.reset_work, 0);
|
||||
queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush);
|
||||
cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
|
||||
|
||||
kill_processes:
|
||||
if (hard_reset) {
|
||||
@ -1581,9 +1689,8 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
|
||||
/* If device is not idle fail the reset process */
|
||||
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
|
||||
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
|
||||
dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
|
||||
idle_mask[1], idle_mask[0]);
|
||||
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
|
||||
print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
|
||||
rc = -EIO;
|
||||
goto out_err;
|
||||
}
|
||||
@ -1658,18 +1765,19 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
* the device will be operational although it shouldn't be
|
||||
*/
|
||||
hdev->asic_funcs->enable_events_from_fw(hdev);
|
||||
} else if (!reset_upon_device_release) {
|
||||
hdev->reset_info.compute_reset_cnt++;
|
||||
}
|
||||
} else {
|
||||
if (!reset_upon_device_release)
|
||||
hdev->reset_info.compute_reset_cnt++;
|
||||
|
||||
if (schedule_hard_reset) {
|
||||
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
|
||||
flags = hdev->reset_info.hard_reset_schedule_flags;
|
||||
hdev->reset_info.hard_reset_schedule_flags = 0;
|
||||
hdev->disabled = true;
|
||||
hard_reset = true;
|
||||
handle_reset_trigger(hdev, flags);
|
||||
goto again;
|
||||
if (schedule_hard_reset) {
|
||||
dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
|
||||
flags = hdev->reset_info.hard_reset_schedule_flags;
|
||||
hdev->reset_info.hard_reset_schedule_flags = 0;
|
||||
hdev->disabled = true;
|
||||
hard_reset = true;
|
||||
handle_reset_trigger(hdev, flags);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1706,6 +1814,73 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_device_cond_reset() - conditionally reset the device.
|
||||
* @hdev: pointer to habanalabs device structure.
|
||||
* @reset_flags: reset flags.
|
||||
* @event_mask: events to notify user about.
|
||||
*
|
||||
* Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
|
||||
* unless another reset precedes it.
|
||||
*/
|
||||
int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
|
||||
{
|
||||
struct hl_ctx *ctx = NULL;
|
||||
|
||||
/* Device release watchdog is only for hard reset */
|
||||
if (!(flags & HL_DRV_RESET_HARD) && hdev->asic_prop.allow_inference_soft_reset)
|
||||
goto device_reset;
|
||||
|
||||
/* F/W reset cannot be postponed */
|
||||
if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
|
||||
goto device_reset;
|
||||
|
||||
/* Device release watchdog is relevant only if user exists and gets a reset notification */
|
||||
if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
|
||||
dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
|
||||
goto device_reset;
|
||||
}
|
||||
|
||||
ctx = hl_get_compute_ctx(hdev);
|
||||
if (!ctx || !ctx->hpriv->notifier_event.eventfd)
|
||||
goto device_reset;
|
||||
|
||||
/* Schedule the device release watchdog work unless reset is already in progress or if the
|
||||
* work is already scheduled.
|
||||
*/
|
||||
spin_lock(&hdev->reset_info.lock);
|
||||
if (hdev->reset_info.in_reset) {
|
||||
spin_unlock(&hdev->reset_info.lock);
|
||||
goto device_reset;
|
||||
}
|
||||
|
||||
if (hdev->reset_info.watchdog_active)
|
||||
goto out;
|
||||
|
||||
hdev->device_release_watchdog_work.flags = flags;
|
||||
dev_dbg(hdev->dev, "Device is going to be reset in %u sec unless being released\n",
|
||||
hdev->device_release_watchdog_timeout_sec);
|
||||
schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
|
||||
msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
|
||||
hdev->reset_info.watchdog_active = 1;
|
||||
out:
|
||||
spin_unlock(&hdev->reset_info.lock);
|
||||
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
|
||||
hl_ctx_put(ctx);
|
||||
|
||||
return 0;
|
||||
|
||||
device_reset:
|
||||
if (event_mask)
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
if (ctx)
|
||||
hl_ctx_put(ctx);
|
||||
|
||||
return hl_device_reset(hdev, flags);
|
||||
}
|
||||
|
||||
static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
|
||||
{
|
||||
mutex_lock(¬ifier_event->lock);
|
||||
@ -1728,6 +1903,11 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
|
||||
{
|
||||
struct hl_fpriv *hpriv;
|
||||
|
||||
if (!event_mask) {
|
||||
dev_warn(hdev->dev, "Skip sending zero event");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&hdev->fpriv_list_lock);
|
||||
|
||||
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
|
||||
@ -1898,6 +2078,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
|
||||
|
||||
hdev->asic_funcs->state_dump_init(hdev);
|
||||
|
||||
hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
|
||||
|
||||
hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
|
||||
hl_debugfs_add_device(hdev);
|
||||
|
||||
@ -2118,6 +2300,8 @@ void hl_device_fini(struct hl_device *hdev)
|
||||
}
|
||||
}
|
||||
|
||||
cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
|
||||
|
||||
/* Disable PCI access from device F/W so it won't send us additional
|
||||
* interrupts. We disable MSI/MSI-X at the halt_engines function and we
|
||||
* can't have the F/W sending us interrupts after that. We need to
|
||||
@ -2144,14 +2328,16 @@ void hl_device_fini(struct hl_device *hdev)
|
||||
*/
|
||||
dev_info(hdev->dev,
|
||||
"Waiting for all processes to exit (timeout of %u seconds)",
|
||||
HL_PENDING_RESET_LONG_SEC);
|
||||
HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
|
||||
|
||||
rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
|
||||
hdev->process_kill_trial_cnt = 0;
|
||||
rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
|
||||
if (rc) {
|
||||
dev_crit(hdev->dev, "Failed to kill all open processes\n");
|
||||
device_disable_open_processes(hdev, false);
|
||||
}
|
||||
|
||||
hdev->process_kill_trial_cnt = 0;
|
||||
rc = device_kill_open_processes(hdev, 0, true);
|
||||
if (rc) {
|
||||
dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
|
||||
@ -2177,6 +2363,8 @@ void hl_device_fini(struct hl_device *hdev)
|
||||
|
||||
hl_mmu_fini(hdev);
|
||||
|
||||
vfree(hdev->captured_err_info.pgf_info.user_mappings);
|
||||
|
||||
hl_eq_fini(hdev, &hdev->event_queue);
|
||||
|
||||
kfree(hdev->shadow_cs_queue);
|
||||
@ -2231,3 +2419,117 @@ inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
|
||||
{
|
||||
writel(val, hdev->rmmio + reg);
|
||||
}
|
||||
|
||||
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
|
||||
u8 flags)
|
||||
{
|
||||
if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
|
||||
dev_err(hdev->dev,
|
||||
"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
|
||||
num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
|
||||
return;
|
||||
}
|
||||
|
||||
/* In case it's the first razwi since the device was opened, capture its parameters */
|
||||
if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1))
|
||||
return;
|
||||
|
||||
hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get());
|
||||
hdev->captured_err_info.razwi.addr = addr;
|
||||
hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines;
|
||||
memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0],
|
||||
num_of_engines * sizeof(u16));
|
||||
hdev->captured_err_info.razwi.flags = flags;
|
||||
}
|
||||
|
||||
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
|
||||
u8 flags, u64 *event_mask)
|
||||
{
|
||||
hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
|
||||
|
||||
if (event_mask)
|
||||
*event_mask |= HL_NOTIFIER_EVENT_RAZWI;
|
||||
}
|
||||
|
||||
static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
|
||||
{
|
||||
struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info;
|
||||
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
|
||||
struct hl_vm_hash_node *hnode;
|
||||
struct hl_userptr *userptr;
|
||||
enum vm_type *vm_type;
|
||||
struct hl_ctx *ctx;
|
||||
u32 map_idx = 0;
|
||||
int i;
|
||||
|
||||
/* Reset previous session count*/
|
||||
pgf_info->num_of_user_mappings = 0;
|
||||
|
||||
ctx = hl_get_compute_ctx(hdev);
|
||||
if (!ctx) {
|
||||
dev_err(hdev->dev, "Can't get user context for user mappings\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ctx->mem_hash_lock);
|
||||
hash_for_each(ctx->mem_hash, i, hnode, node) {
|
||||
vm_type = hnode->ptr;
|
||||
if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
|
||||
((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
|
||||
pgf_info->num_of_user_mappings++;
|
||||
|
||||
}
|
||||
|
||||
if (!pgf_info->num_of_user_mappings)
|
||||
goto finish;
|
||||
|
||||
/* In case we already allocated in previous session, need to release it before
|
||||
* allocating new buffer.
|
||||
*/
|
||||
vfree(pgf_info->user_mappings);
|
||||
pgf_info->user_mappings =
|
||||
vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
|
||||
if (!pgf_info->user_mappings) {
|
||||
pgf_info->num_of_user_mappings = 0;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
hash_for_each(ctx->mem_hash, i, hnode, node) {
|
||||
vm_type = hnode->ptr;
|
||||
if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
|
||||
userptr = hnode->ptr;
|
||||
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
|
||||
pgf_info->user_mappings[map_idx].size = userptr->size;
|
||||
map_idx++;
|
||||
} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
|
||||
phys_pg_pack = hnode->ptr;
|
||||
pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
|
||||
pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
|
||||
map_idx++;
|
||||
}
|
||||
}
|
||||
finish:
|
||||
mutex_unlock(&ctx->mem_hash_lock);
|
||||
hl_ctx_put(ctx);
|
||||
}
|
||||
|
||||
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
|
||||
{
|
||||
/* Capture only the first page fault */
|
||||
if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1))
|
||||
return;
|
||||
|
||||
hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get());
|
||||
hdev->captured_err_info.pgf_info.pgf.addr = addr;
|
||||
hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id;
|
||||
hl_capture_user_mappings(hdev, is_pmmu);
|
||||
}
|
||||
|
||||
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
|
||||
u64 *event_mask)
|
||||
{
|
||||
hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
|
||||
|
||||
if (event_mask)
|
||||
*event_mask |= HL_NOTIFIER_EVENT_PAGE_FAULT;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
|
||||
|
||||
@ -323,6 +324,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
|
||||
if (!prop->supports_advanced_cpucp_rc) {
|
||||
dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
|
||||
rc = -EIO;
|
||||
goto scrub_descriptor;
|
||||
}
|
||||
|
||||
@ -615,16 +617,12 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
|
||||
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
|
||||
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
|
||||
|
||||
/* All warnings should go here in order not to reach the unknown error validation */
|
||||
if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
|
||||
dev_warn(hdev->dev,
|
||||
"Device boot warning - EEPROM failure detected, default settings applied\n");
|
||||
/* This is a warning so we don't want it to disable the
|
||||
* device
|
||||
*/
|
||||
err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
|
||||
dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
|
||||
err_exists = true;
|
||||
}
|
||||
|
||||
/* All warnings should go here in order not to reach the unknown error validation */
|
||||
if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
|
||||
dev_warn(hdev->dev,
|
||||
"Device boot warning - Skipped DRAM initialization\n");
|
||||
@ -1782,6 +1780,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
|
||||
|
||||
/* first send clear command to clean former commands */
|
||||
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* send the actual command */
|
||||
hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
|
||||
@ -1988,10 +1988,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
|
||||
struct fw_load_mgr *fw_loader)
|
||||
{
|
||||
struct lkd_fw_comms_desc *fw_desc;
|
||||
void __iomem *src, *temp_fw_desc;
|
||||
struct pci_mem_region *region;
|
||||
struct fw_response *response;
|
||||
u16 fw_data_size;
|
||||
enum pci_region region_id;
|
||||
void __iomem *src;
|
||||
int rc;
|
||||
|
||||
fw_desc = &fw_loader->dynamic_loader.comm_desc;
|
||||
@ -2018,9 +2019,29 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
|
||||
fw_loader->dynamic_loader.fw_desc_valid = false;
|
||||
src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
|
||||
response->ram_offset;
|
||||
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
|
||||
|
||||
return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc);
|
||||
/*
|
||||
* We do the copy of the fw descriptor in 2 phases:
|
||||
* 1. copy the header + data info according to our lkd_fw_comms_desc definition.
|
||||
* then we're able to read the actual data size provided by fw.
|
||||
* this is needed for cases where data in descriptor was changed(add/remove)
|
||||
* in embedded specs header file before updating lkd copy of the header file
|
||||
* 2. copy descriptor to temporary buffer with aligned size and send it to validation
|
||||
*/
|
||||
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
|
||||
fw_data_size = le16_to_cpu(fw_desc->header.size);
|
||||
|
||||
temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
|
||||
if (!temp_fw_desc)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
|
||||
|
||||
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
|
||||
(struct lkd_fw_comms_desc *) temp_fw_desc);
|
||||
vfree(temp_fw_desc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2507,7 +2528,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
|
||||
struct fw_load_mgr *fw_loader)
|
||||
{
|
||||
struct cpu_dyn_regs *dyn_regs;
|
||||
int rc;
|
||||
int rc, fw_error_rc;
|
||||
|
||||
dev_info(hdev->dev,
|
||||
"Loading %sfirmware to device, may take some time...\n",
|
||||
@ -2607,14 +2628,17 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
|
||||
|
||||
hl_fw_dynamic_update_linux_interrupt_if(hdev);
|
||||
|
||||
return 0;
|
||||
|
||||
protocol_err:
|
||||
if (fw_loader->dynamic_loader.fw_desc_valid)
|
||||
fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
|
||||
if (fw_loader->dynamic_loader.fw_desc_valid) {
|
||||
fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_err1),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
|
||||
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
|
||||
|
||||
if (fw_error_rc)
|
||||
return fw_error_rc;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2983,7 +3007,7 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
|
||||
int rc;
|
||||
|
||||
req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
|
||||
if (!data) {
|
||||
if (!req_cpu_addr) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
|
||||
return -ENOMEM;
|
||||
|
@ -50,9 +50,14 @@ struct hl_fpriv;
|
||||
#define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
|
||||
#define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
|
||||
|
||||
#define HL_PENDING_RESET_PER_SEC 10
|
||||
#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */
|
||||
#define HL_PENDING_RESET_LONG_SEC 60
|
||||
#define HL_PENDING_RESET_PER_SEC 10
|
||||
#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */
|
||||
#define HL_PENDING_RESET_LONG_SEC 60
|
||||
/*
|
||||
* In device fini, wait 10 minutes for user processes to be terminated after we kill them.
|
||||
* This is needed to prevent situation of clearing resources while user processes are still alive.
|
||||
*/
|
||||
#define HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI 600
|
||||
|
||||
#define HL_HARD_RESET_MAX_TIMEOUT 120
|
||||
#define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3)
|
||||
@ -191,6 +196,9 @@ enum hl_mmu_enablement {
|
||||
*
|
||||
* - HL_DRV_RESET_DELAY
|
||||
* Set if a delay should be added before the reset
|
||||
*
|
||||
* - HL_DRV_RESET_FROM_WD_THR
|
||||
* Set if the caller is the device release watchdog thread
|
||||
*/
|
||||
|
||||
#define HL_DRV_RESET_HARD (1 << 0)
|
||||
@ -201,6 +209,7 @@ enum hl_mmu_enablement {
|
||||
#define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
|
||||
#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
|
||||
#define HL_DRV_RESET_DELAY (1 << 7)
|
||||
#define HL_DRV_RESET_FROM_WD_THR (1 << 8)
|
||||
|
||||
/*
|
||||
* Security
|
||||
@ -1188,7 +1197,7 @@ struct hl_dec {
|
||||
* @ASIC_GAUDI: Gaudi device (HL-2000).
|
||||
* @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
|
||||
* @ASIC_GAUDI2: Gaudi2 device.
|
||||
* @ASIC_GAUDI2_SEC: Gaudi2 secured device.
|
||||
* @ASIC_GAUDI2B: Gaudi2B device.
|
||||
*/
|
||||
enum hl_asic_type {
|
||||
ASIC_INVALID,
|
||||
@ -1196,7 +1205,7 @@ enum hl_asic_type {
|
||||
ASIC_GAUDI,
|
||||
ASIC_GAUDI_SEC,
|
||||
ASIC_GAUDI2,
|
||||
ASIC_GAUDI2_SEC,
|
||||
ASIC_GAUDI2B,
|
||||
};
|
||||
|
||||
struct hl_cs_parser;
|
||||
@ -2489,13 +2498,9 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
|
||||
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
|
||||
|
||||
#define RMWREG32(reg, val, mask) \
|
||||
do { \
|
||||
u32 tmp_ = RREG32(reg); \
|
||||
tmp_ &= ~(mask); \
|
||||
tmp_ |= ((val) << __ffs(mask)); \
|
||||
WREG32(reg, tmp_); \
|
||||
} while (0)
|
||||
#define RMWREG32_SHIFTED(reg, val, mask) WREG32_P(reg, val, ~(mask))
|
||||
|
||||
#define RMWREG32(reg, val, mask) RMWREG32_SHIFTED(reg, (val) << __ffs(mask), mask)
|
||||
|
||||
#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
|
||||
|
||||
@ -2528,7 +2533,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
break; \
|
||||
(val) = __elbi_read; \
|
||||
} else {\
|
||||
(val) = RREG32((u32)(addr)); \
|
||||
(val) = RREG32(lower_32_bits(addr)); \
|
||||
} \
|
||||
if (cond) \
|
||||
break; \
|
||||
@ -2539,7 +2544,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
break; \
|
||||
(val) = __elbi_read; \
|
||||
} else {\
|
||||
(val) = RREG32((u32)(addr)); \
|
||||
(val) = RREG32(lower_32_bits(addr)); \
|
||||
} \
|
||||
break; \
|
||||
} \
|
||||
@ -2594,7 +2599,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
if (__rc) \
|
||||
break; \
|
||||
} else { \
|
||||
__read_val = RREG32((u32)(addr_arr)[__arr_idx]); \
|
||||
__read_val = RREG32(lower_32_bits(addr_arr[__arr_idx])); \
|
||||
} \
|
||||
if (__read_val == (expected_val)) \
|
||||
__elem_bitmask &= ~BIT_ULL(__arr_idx); \
|
||||
@ -2682,17 +2687,15 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
struct hwmon_chip_info;
|
||||
|
||||
/**
|
||||
* struct hl_device_reset_work - reset workqueue task wrapper.
|
||||
* @wq: work queue for device reset procedure.
|
||||
* struct hl_device_reset_work - reset work wrapper.
|
||||
* @reset_work: reset work to be done.
|
||||
* @hdev: habanalabs device structure.
|
||||
* @flags: reset flags.
|
||||
*/
|
||||
struct hl_device_reset_work {
|
||||
struct workqueue_struct *wq;
|
||||
struct delayed_work reset_work;
|
||||
struct hl_device *hdev;
|
||||
u32 flags;
|
||||
struct delayed_work reset_work;
|
||||
struct hl_device *hdev;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2811,7 +2814,7 @@ struct hl_mmu_funcs {
|
||||
|
||||
/**
|
||||
* struct hl_prefetch_work - prefetch work structure handler
|
||||
* @pf_work: actual work struct.
|
||||
* @prefetch_work: actual work struct.
|
||||
* @ctx: compute context.
|
||||
* @va: virtual address to pre-fetch.
|
||||
* @size: pre-fetch size.
|
||||
@ -2819,7 +2822,7 @@ struct hl_mmu_funcs {
|
||||
* @asid: ASID for maintenance operation.
|
||||
*/
|
||||
struct hl_prefetch_work {
|
||||
struct work_struct pf_work;
|
||||
struct work_struct prefetch_work;
|
||||
struct hl_ctx *ctx;
|
||||
u64 va;
|
||||
u64 size;
|
||||
@ -2925,30 +2928,6 @@ struct cs_timeout_info {
|
||||
u64 seq;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct razwi_info - info about last razwi error occurred.
|
||||
* @timestamp: razwi timestamp.
|
||||
* @write_enable: if set writing to razwi parameters in the structure is enabled.
|
||||
* otherwise - disabled, so the first (root cause) razwi will not be overwritten.
|
||||
* @addr: address that caused razwi.
|
||||
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
|
||||
* not have engine id it will be set to U16_MAX.
|
||||
* @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
|
||||
* engines which one them caused the razwi. In that case, it will contain the
|
||||
* second possible engine id, otherwise it will be set to U16_MAX.
|
||||
* @non_engine_initiator: in case the initiator of the razwi does not have engine id.
|
||||
* @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
|
||||
*/
|
||||
struct razwi_info {
|
||||
ktime_t timestamp;
|
||||
atomic_t write_enable;
|
||||
u64 addr;
|
||||
u16 engine_id_1;
|
||||
u16 engine_id_2;
|
||||
u8 non_engine_initiator;
|
||||
u8 type;
|
||||
};
|
||||
|
||||
#define MAX_QMAN_STREAMS_INFO 4
|
||||
#define OPCODE_INFO_MAX_ADDR_SIZE 8
|
||||
/**
|
||||
@ -2981,16 +2960,38 @@ struct undefined_opcode_info {
|
||||
bool write_enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct page_fault_info - info about page fault
|
||||
* @pgf_info: page fault information.
|
||||
* @user_mappings: buffer containing user mappings.
|
||||
* @num_of_user_mappings: number of user mappings.
|
||||
*/
|
||||
struct page_fault_info {
|
||||
struct hl_page_fault_info pgf;
|
||||
struct hl_user_mapping *user_mappings;
|
||||
u64 num_of_user_mappings;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_error_info - holds information collected during an error.
|
||||
* @cs_timeout: CS timeout error information.
|
||||
* @razwi: razwi information.
|
||||
* @razwi_info_recorded: if set writing to razwi information is enabled.
|
||||
* otherwise - disabled, so the first (root cause) razwi will not be
|
||||
* overwritten.
|
||||
* @undef_opcode: undefined opcode information
|
||||
* @pgf_info: page fault information.
|
||||
* @pgf_info_recorded: if set writing to page fault information is enabled.
|
||||
* otherwise - disabled, so the first (root cause) page fault will not be
|
||||
* overwritten.
|
||||
*/
|
||||
struct hl_error_info {
|
||||
struct cs_timeout_info cs_timeout;
|
||||
struct razwi_info razwi;
|
||||
struct hl_info_razwi_event razwi;
|
||||
atomic_t razwi_info_recorded;
|
||||
struct undefined_opcode_info undef_opcode;
|
||||
struct page_fault_info pgf_info;
|
||||
atomic_t pgf_info_recorded;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -3013,6 +3014,7 @@ struct hl_error_info {
|
||||
* same cause.
|
||||
* @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
|
||||
* complete instead.
|
||||
* @watchdog_active: true if a device release watchdog work is scheduled.
|
||||
*/
|
||||
struct hl_reset_info {
|
||||
spinlock_t lock;
|
||||
@ -3023,12 +3025,11 @@ struct hl_reset_info {
|
||||
u8 in_compute_reset;
|
||||
u8 needs_reset;
|
||||
u8 hard_reset_pending;
|
||||
|
||||
u8 curr_reset_cause;
|
||||
u8 prev_reset_trigger;
|
||||
u8 reset_trigger_repeated;
|
||||
|
||||
u8 skip_reset_on_timeout;
|
||||
u8 watchdog_active;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -3044,6 +3045,8 @@ struct hl_reset_info {
|
||||
* @dev_ctrl: related kernel device structure for the control device
|
||||
* @work_heartbeat: delayed work for CPU-CP is-alive check.
|
||||
* @device_reset_work: delayed work which performs hard reset
|
||||
* @device_release_watchdog_work: watchdog work that performs hard reset if user doesn't release
|
||||
* device upon certain error cases.
|
||||
* @asic_name: ASIC specific name.
|
||||
* @asic_type: ASIC specific type.
|
||||
* @completion_queue: array of hl_cq.
|
||||
@ -3062,7 +3065,8 @@ struct hl_reset_info {
|
||||
* @cs_cmplt_wq: work queue of CS completions for executing work in process
|
||||
* context.
|
||||
* @ts_free_obj_wq: work queue for timestamp registration objects release.
|
||||
* @pf_wq: work queue for MMU pre-fetch operations.
|
||||
* @prefetch_wq: work queue for MMU pre-fetch operations.
|
||||
* @reset_wq: work queue for device reset procedure.
|
||||
* @kernel_ctx: Kernel driver context structure.
|
||||
* @kernel_queues: array of hl_hw_queue.
|
||||
* @cs_mirror_list: CS mirror list for TDR.
|
||||
@ -3152,6 +3156,7 @@ struct hl_reset_info {
|
||||
* indicates which decoder engines are binned-out
|
||||
* @edma_binning: contains mask of edma engines that is received from the f/w which
|
||||
* indicates which edma engines are binned-out
|
||||
* @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds.
|
||||
* @id: device minor.
|
||||
* @id_control: minor of the control device.
|
||||
* @cdev_idx: char device index. Used for setting its name.
|
||||
@ -3221,6 +3226,7 @@ struct hl_device {
|
||||
struct device *dev_ctrl;
|
||||
struct delayed_work work_heartbeat;
|
||||
struct hl_device_reset_work device_reset_work;
|
||||
struct hl_device_reset_work device_release_watchdog_work;
|
||||
char asic_name[HL_STR_MAX];
|
||||
char status[HL_DEV_STS_MAX][HL_STR_MAX];
|
||||
enum hl_asic_type asic_type;
|
||||
@ -3233,7 +3239,8 @@ struct hl_device {
|
||||
struct workqueue_struct *eq_wq;
|
||||
struct workqueue_struct *cs_cmplt_wq;
|
||||
struct workqueue_struct *ts_free_obj_wq;
|
||||
struct workqueue_struct *pf_wq;
|
||||
struct workqueue_struct *prefetch_wq;
|
||||
struct workqueue_struct *reset_wq;
|
||||
struct hl_ctx *kernel_ctx;
|
||||
struct hl_hw_queue *kernel_queues;
|
||||
struct list_head cs_mirror_list;
|
||||
@ -3314,6 +3321,7 @@ struct hl_device {
|
||||
u32 high_pll;
|
||||
u32 decoder_binning;
|
||||
u32 edma_binning;
|
||||
u32 device_release_watchdog_timeout_sec;
|
||||
u16 id;
|
||||
u16 id_control;
|
||||
u16 cdev_idx;
|
||||
@ -3488,6 +3496,8 @@ void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_
|
||||
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
|
||||
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
|
||||
enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar);
|
||||
int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
|
||||
enum debugfs_access_type acc_type);
|
||||
int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
|
||||
@ -3496,6 +3506,8 @@ int hl_device_open(struct inode *inode, struct file *filp);
|
||||
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
|
||||
bool hl_device_operational(struct hl_device *hdev,
|
||||
enum hl_device_status *status);
|
||||
bool hl_ctrl_device_operational(struct hl_device *hdev,
|
||||
enum hl_device_status *status);
|
||||
enum hl_device_status hl_device_status(struct hl_device *hdev);
|
||||
int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
|
||||
int hl_hw_queues_create(struct hl_device *hdev);
|
||||
@ -3549,6 +3561,7 @@ void hl_device_fini(struct hl_device *hdev);
|
||||
int hl_device_suspend(struct hl_device *hdev);
|
||||
int hl_device_resume(struct hl_device *hdev);
|
||||
int hl_device_reset(struct hl_device *hdev, u32 flags);
|
||||
int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask);
|
||||
void hl_hpriv_get(struct hl_fpriv *hpriv);
|
||||
int hl_hpriv_put(struct hl_fpriv *hpriv);
|
||||
int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
|
||||
@ -3762,7 +3775,8 @@ void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *d
|
||||
|
||||
void hw_sob_get(struct hl_hw_sob *hw_sob);
|
||||
void hw_sob_put(struct hl_hw_sob *hw_sob);
|
||||
void hl_encaps_handle_do_release(struct kref *ref);
|
||||
void hl_encaps_release_handle_and_put_ctx(struct kref *ref);
|
||||
void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref);
|
||||
void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
|
||||
struct hl_cs *cs, struct hl_cs_job *job,
|
||||
struct hl_cs_compl *cs_cmpl);
|
||||
@ -3798,6 +3812,13 @@ hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
|
||||
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
|
||||
void *args);
|
||||
__printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...);
|
||||
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
|
||||
u8 flags);
|
||||
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
|
||||
u8 flags, u64 *event_mask);
|
||||
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu);
|
||||
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
|
||||
u64 *event_mask);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#define pr_fmt(fmt) "habanalabs: " fmt
|
||||
|
||||
#include "habanalabs.h"
|
||||
#include "../include/hw_ip/pci/pci_general.h"
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/aer.h>
|
||||
@ -74,16 +75,17 @@ MODULE_DEVICE_TABLE(pci, ids);
|
||||
/*
|
||||
* get_asic_type - translate device id to asic type
|
||||
*
|
||||
* @device: id of the PCI device
|
||||
* @hdev: pointer to habanalabs device structure.
|
||||
*
|
||||
* Translate device id to asic type.
|
||||
* Translate device id and revision id to asic type.
|
||||
* In case of unidentified device, return -1
|
||||
*/
|
||||
static enum hl_asic_type get_asic_type(u16 device)
|
||||
static enum hl_asic_type get_asic_type(struct hl_device *hdev)
|
||||
{
|
||||
enum hl_asic_type asic_type;
|
||||
struct pci_dev *pdev = hdev->pdev;
|
||||
enum hl_asic_type asic_type = ASIC_INVALID;
|
||||
|
||||
switch (device) {
|
||||
switch (pdev->device) {
|
||||
case PCI_IDS_GOYA:
|
||||
asic_type = ASIC_GOYA;
|
||||
break;
|
||||
@ -94,10 +96,18 @@ static enum hl_asic_type get_asic_type(u16 device)
|
||||
asic_type = ASIC_GAUDI_SEC;
|
||||
break;
|
||||
case PCI_IDS_GAUDI2:
|
||||
asic_type = ASIC_GAUDI2;
|
||||
switch (pdev->revision) {
|
||||
case REV_ID_A:
|
||||
asic_type = ASIC_GAUDI2;
|
||||
break;
|
||||
case REV_ID_B:
|
||||
asic_type = ASIC_GAUDI2B;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
asic_type = ASIC_INVALID;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -212,7 +222,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
|
||||
hl_debugfs_add_file(hpriv);
|
||||
|
||||
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
|
||||
atomic_set(&hdev->captured_err_info.razwi.write_enable, 1);
|
||||
atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0);
|
||||
atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0);
|
||||
hdev->captured_err_info.undef_opcode.write_enable = true;
|
||||
|
||||
hdev->open_counter++;
|
||||
@ -270,9 +281,9 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
|
||||
|
||||
mutex_lock(&hdev->fpriv_ctrl_list_lock);
|
||||
|
||||
if (!hl_device_operational(hdev, NULL)) {
|
||||
if (!hl_ctrl_device_operational(hdev, NULL)) {
|
||||
dev_dbg_ratelimited(hdev->dev_ctrl,
|
||||
"Can't open %s because it is disabled or in reset\n",
|
||||
"Can't open %s because it is disabled\n",
|
||||
dev_name(hdev->dev_ctrl));
|
||||
rc = -EPERM;
|
||||
goto out_err;
|
||||
@ -415,7 +426,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
|
||||
/* First, we must find out which ASIC are we handling. This is needed
|
||||
* to configure the behavior of the driver (kernel parameters)
|
||||
*/
|
||||
hdev->asic_type = get_asic_type(pdev->device);
|
||||
hdev->asic_type = get_asic_type(hdev);
|
||||
if (hdev->asic_type == ASIC_INVALID) {
|
||||
dev_err(&pdev->dev, "Unsupported ASIC\n");
|
||||
rc = -ENODEV;
|
||||
@ -594,15 +605,16 @@ hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
|
||||
switch (state) {
|
||||
case pci_channel_io_normal:
|
||||
dev_warn(hdev->dev, "PCI normal state error detected\n");
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
|
||||
case pci_channel_io_frozen:
|
||||
dev_warn(hdev->dev, "frozen state error detected\n");
|
||||
dev_warn(hdev->dev, "PCI frozen state error detected\n");
|
||||
result = PCI_ERS_RESULT_NEED_RESET;
|
||||
break;
|
||||
|
||||
case pci_channel_io_perm_failure:
|
||||
dev_warn(hdev->dev, "failure state error detected\n");
|
||||
dev_warn(hdev->dev, "PCI failure state error detected\n");
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
break;
|
||||
|
||||
@ -638,6 +650,10 @@ static void hl_pci_err_resume(struct pci_dev *pdev)
|
||||
*/
|
||||
static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct hl_device *hdev = pci_get_drvdata(pdev);
|
||||
|
||||
dev_warn(hdev->dev, "PCI slot reset detected\n");
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
|
@ -10,10 +10,11 @@
|
||||
#include <uapi/misc/habanalabs.h>
|
||||
#include "habanalabs.h"
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
|
||||
@ -105,6 +106,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
|
||||
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
|
||||
hw_ip.server_type = prop->server_type;
|
||||
hw_ip.security_enabled = prop->fw_security_enabled;
|
||||
hw_ip.revision_id = hdev->pdev->revision;
|
||||
|
||||
return copy_to_user(out, &hw_ip,
|
||||
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
|
||||
@ -121,6 +123,10 @@ static int hw_events_info(struct hl_device *hdev, bool aggregate,
|
||||
return -EINVAL;
|
||||
|
||||
arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
|
||||
if (!arr) {
|
||||
dev_err(hdev->dev, "Events info not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
|
||||
}
|
||||
@ -603,20 +609,14 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
{
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
u32 max_size = args->return_size;
|
||||
struct hl_info_razwi_event info = {0};
|
||||
struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi;
|
||||
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
|
||||
|
||||
if ((!max_size) || (!out))
|
||||
return -EINVAL;
|
||||
|
||||
info.timestamp = ktime_to_ns(hdev->captured_err_info.razwi.timestamp);
|
||||
info.addr = hdev->captured_err_info.razwi.addr;
|
||||
info.engine_id_1 = hdev->captured_err_info.razwi.engine_id_1;
|
||||
info.engine_id_2 = hdev->captured_err_info.razwi.engine_id_2;
|
||||
info.no_engine_id = hdev->captured_err_info.razwi.non_engine_initiator;
|
||||
info.error_type = hdev->captured_err_info.razwi.type;
|
||||
|
||||
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
|
||||
return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event)))
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
@ -784,6 +784,42 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
{
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
u32 max_size = args->return_size;
|
||||
struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf;
|
||||
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
|
||||
|
||||
if ((!max_size) || (!out))
|
||||
return -EINVAL;
|
||||
|
||||
return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info)))
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
|
||||
{
|
||||
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
|
||||
u32 user_buf_size = args->return_size;
|
||||
struct hl_device *hdev = hpriv->hdev;
|
||||
struct page_fault_info *pgf_info;
|
||||
u64 actual_size;
|
||||
|
||||
pgf_info = &hdev->captured_err_info.pgf_info;
|
||||
args->array_size = pgf_info->num_of_user_mappings;
|
||||
|
||||
if (!out)
|
||||
return -EINVAL;
|
||||
|
||||
actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
|
||||
if (user_buf_size < actual_size)
|
||||
return -ENOMEM;
|
||||
|
||||
return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size))
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
|
||||
struct device *dev)
|
||||
{
|
||||
@ -843,6 +879,15 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
|
||||
case HL_INFO_GET_EVENTS:
|
||||
return events_info(hpriv, args);
|
||||
|
||||
case HL_INFO_PAGE_FAULT_EVENT:
|
||||
return page_fault_info(hpriv, args);
|
||||
|
||||
case HL_INFO_USER_MAPPINGS:
|
||||
return user_mappings_info(hpriv, args);
|
||||
|
||||
case HL_INFO_UNREGISTER_EVENTFD:
|
||||
return eventfd_unregister(hpriv, args);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -899,9 +944,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
|
||||
case HL_INFO_REGISTER_EVENTFD:
|
||||
return eventfd_register(hpriv, args);
|
||||
|
||||
case HL_INFO_UNREGISTER_EVENTFD:
|
||||
return eventfd_unregister(hpriv, args);
|
||||
|
||||
case HL_INFO_ENGINE_STATUS:
|
||||
return engine_status_info(hpriv, args);
|
||||
|
||||
|
@ -1689,7 +1689,7 @@ static int hl_dmabuf_attach(struct dma_buf *dmabuf,
|
||||
hl_dmabuf = dmabuf->priv;
|
||||
hdev = hl_dmabuf->ctx->hdev;
|
||||
|
||||
rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
|
||||
rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
|
||||
|
||||
if (rc < 0)
|
||||
attachment->peer2peer = false;
|
||||
@ -2109,7 +2109,7 @@ static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
|
||||
|
||||
/* Allocate the internal kernel buffer */
|
||||
size = num_elements * sizeof(struct hl_user_pending_interrupt);
|
||||
p = vmalloc(size);
|
||||
p = vzalloc(size);
|
||||
if (!p)
|
||||
goto free_user_buff;
|
||||
|
||||
@ -2508,24 +2508,20 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
|
||||
|
||||
/*
|
||||
* PAGE_SIZE alignment
|
||||
* it is the callers responsibility to align the addresses if the
|
||||
* it is the caller's responsibility to align the addresses if the
|
||||
* page size is not a power of 2
|
||||
*/
|
||||
|
||||
if (is_power_of_2(page_size)) {
|
||||
if (start & (PAGE_SIZE - 1)) {
|
||||
start &= PAGE_MASK;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
start = round_up(start, page_size);
|
||||
|
||||
/*
|
||||
* The end of the range is inclusive, hence we need to align it
|
||||
* to the end of the last full page in the range. For example if
|
||||
* end = 0x3ff5 with page size 0x1000, we need to align it to
|
||||
* 0x2fff. The remainig 0xff5 bytes do not form a full page.
|
||||
* 0x2fff. The remaining 0xff5 bytes do not form a full page.
|
||||
*/
|
||||
if ((end + 1) & (PAGE_SIZE - 1))
|
||||
end = ((end + 1) & PAGE_MASK) - 1;
|
||||
end = round_down(end + 1, page_size) - 1;
|
||||
}
|
||||
|
||||
if (start >= end) {
|
||||
|
@ -635,7 +635,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
|
||||
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
|
||||
break;
|
||||
case ASIC_GAUDI2:
|
||||
case ASIC_GAUDI2_SEC:
|
||||
case ASIC_GAUDI2B:
|
||||
/* MMUs in Gaudi2 are always host resident */
|
||||
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
|
||||
break;
|
||||
@ -699,7 +699,7 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
|
||||
|
||||
static void hl_mmu_prefetch_work_function(struct work_struct *work)
|
||||
{
|
||||
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
|
||||
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work);
|
||||
struct hl_ctx *ctx = pfw->ctx;
|
||||
struct hl_device *hdev = ctx->hdev;
|
||||
|
||||
@ -723,25 +723,25 @@ static void hl_mmu_prefetch_work_function(struct work_struct *work)
|
||||
|
||||
int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
|
||||
{
|
||||
struct hl_prefetch_work *handle_pf_work;
|
||||
struct hl_prefetch_work *handle_prefetch_work;
|
||||
|
||||
handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL);
|
||||
if (!handle_pf_work)
|
||||
handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL);
|
||||
if (!handle_prefetch_work)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function);
|
||||
handle_pf_work->ctx = ctx;
|
||||
handle_pf_work->va = va;
|
||||
handle_pf_work->size = size;
|
||||
handle_pf_work->flags = flags;
|
||||
handle_pf_work->asid = asid;
|
||||
INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function);
|
||||
handle_prefetch_work->ctx = ctx;
|
||||
handle_prefetch_work->va = va;
|
||||
handle_prefetch_work->size = size;
|
||||
handle_prefetch_work->flags = flags;
|
||||
handle_prefetch_work->asid = asid;
|
||||
|
||||
/*
|
||||
* as actual prefetch is done in a WQ we must get the context (and put it
|
||||
* at the end of the work function)
|
||||
*/
|
||||
hl_ctx_get(ctx);
|
||||
queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work);
|
||||
queue_work(ctx->hdev->prefetch_wq, &handle_prefetch_work->prefetch_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -248,8 +248,8 @@ static ssize_t device_type_show(struct device *dev,
|
||||
case ASIC_GAUDI2:
|
||||
str = "GAUDI2";
|
||||
break;
|
||||
case ASIC_GAUDI2_SEC:
|
||||
str = "GAUDI2 SEC";
|
||||
case ASIC_GAUDI2B:
|
||||
str = "GAUDI2B";
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
|
||||
|
@ -6505,8 +6505,8 @@ static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
|
||||
}
|
||||
|
||||
static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
|
||||
bool is_write, s32 *engine_id_1,
|
||||
s32 *engine_id_2)
|
||||
bool is_write, u16 *engine_id_1,
|
||||
u16 *engine_id_2)
|
||||
{
|
||||
u32 dma_id[2], dma_offset, err_cause[2], mask, i;
|
||||
|
||||
@ -6603,7 +6603,7 @@ static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u3
|
||||
}
|
||||
|
||||
static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
|
||||
u32 *engine_id_1, u32 *engine_id_2)
|
||||
u16 *engine_id_1, u16 *engine_id_2)
|
||||
{
|
||||
u32 val, x_y, axi_id;
|
||||
|
||||
@ -6719,8 +6719,8 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool i
|
||||
return "unknown initiator";
|
||||
}
|
||||
|
||||
static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1,
|
||||
u32 *engine_id_2)
|
||||
static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u16 *engine_id_1,
|
||||
u16 *engine_id_2, bool *is_read, bool *is_write)
|
||||
{
|
||||
|
||||
if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
|
||||
@ -6728,6 +6728,7 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i
|
||||
"RAZWI event caused by illegal write of %s\n",
|
||||
gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
|
||||
WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
|
||||
*is_write = true;
|
||||
}
|
||||
|
||||
if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
|
||||
@ -6735,10 +6736,11 @@ static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_i
|
||||
"RAZWI event caused by illegal read of %s\n",
|
||||
gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
|
||||
WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
|
||||
*is_read = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type)
|
||||
static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u64 *event_mask)
|
||||
{
|
||||
struct gaudi_device *gaudi = hdev->asic_specific;
|
||||
u32 val;
|
||||
@ -6753,7 +6755,7 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr
|
||||
*addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
|
||||
|
||||
dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
|
||||
*type = HL_RAZWI_PAGE_FAULT;
|
||||
hl_handle_page_fault(hdev, *addr, 0, true, event_mask);
|
||||
|
||||
WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
|
||||
}
|
||||
@ -6765,7 +6767,6 @@ static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr
|
||||
*addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
|
||||
|
||||
dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
|
||||
*type = HL_RAZWI_MMU_ACCESS_ERROR;
|
||||
|
||||
WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
|
||||
}
|
||||
@ -7300,48 +7301,44 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
|
||||
}
|
||||
|
||||
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
|
||||
bool razwi)
|
||||
bool razwi, u64 *event_mask)
|
||||
{
|
||||
u32 engine_id_1, engine_id_2;
|
||||
bool is_read = false, is_write = false;
|
||||
u16 engine_id[2], num_of_razwi_eng = 0;
|
||||
char desc[64] = "";
|
||||
u64 razwi_addr = 0;
|
||||
u8 razwi_type;
|
||||
int rc;
|
||||
u8 razwi_flags = 0;
|
||||
|
||||
/*
|
||||
* Init engine id by default as not valid and only if razwi initiated from engine with
|
||||
* engine id it will get valid value.
|
||||
* Init razwi type to default, will be changed only if razwi caused by page fault of
|
||||
* MMU access error
|
||||
*/
|
||||
engine_id_1 = U16_MAX;
|
||||
engine_id_2 = U16_MAX;
|
||||
razwi_type = U8_MAX;
|
||||
engine_id[0] = HL_RAZWI_NA_ENG_ID;
|
||||
engine_id[1] = HL_RAZWI_NA_ENG_ID;
|
||||
|
||||
gaudi_get_event_desc(event_type, desc, sizeof(desc));
|
||||
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
|
||||
event_type, desc);
|
||||
|
||||
if (razwi) {
|
||||
gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2);
|
||||
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
|
||||
gaudi_print_and_get_razwi_info(hdev, &engine_id[0], &engine_id[1], &is_read,
|
||||
&is_write);
|
||||
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, event_mask);
|
||||
|
||||
/* In case it's the first razwi, save its parameters*/
|
||||
rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0);
|
||||
if (rc) {
|
||||
hdev->captured_err_info.razwi.timestamp = ktime_get();
|
||||
hdev->captured_err_info.razwi.addr = razwi_addr;
|
||||
hdev->captured_err_info.razwi.engine_id_1 = engine_id_1;
|
||||
hdev->captured_err_info.razwi.engine_id_2 = engine_id_2;
|
||||
/*
|
||||
* If first engine id holds non valid value the razwi initiator
|
||||
* does not have engine id
|
||||
*/
|
||||
hdev->captured_err_info.razwi.non_engine_initiator =
|
||||
(engine_id_1 == U16_MAX);
|
||||
hdev->captured_err_info.razwi.type = razwi_type;
|
||||
if (is_read)
|
||||
razwi_flags |= HL_RAZWI_READ;
|
||||
if (is_write)
|
||||
razwi_flags |= HL_RAZWI_WRITE;
|
||||
|
||||
if (engine_id[0] != HL_RAZWI_NA_ENG_ID) {
|
||||
if (engine_id[1] != HL_RAZWI_NA_ENG_ID)
|
||||
num_of_razwi_eng = 2;
|
||||
else
|
||||
num_of_razwi_eng = 1;
|
||||
}
|
||||
|
||||
hl_handle_razwi(hdev, razwi_addr, engine_id, num_of_razwi_eng, razwi_flags,
|
||||
event_mask);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7350,8 +7347,8 @@ static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
|
||||
{
|
||||
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
|
||||
|
||||
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
|
||||
sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
|
||||
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
|
||||
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
|
||||
}
|
||||
|
||||
static void gaudi_print_fw_alive_info(struct hl_device *hdev,
|
||||
@ -7359,9 +7356,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev,
|
||||
{
|
||||
dev_err(hdev->dev,
|
||||
"FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n",
|
||||
(fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ?
|
||||
"Minor" : "Critical", fw_alive->process_id,
|
||||
fw_alive->thread_id, fw_alive->uptime_seconds);
|
||||
(fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical",
|
||||
le32_to_cpu(fw_alive->process_id),
|
||||
le32_to_cpu(fw_alive->thread_id),
|
||||
le64_to_cpu(fw_alive->uptime_seconds));
|
||||
}
|
||||
|
||||
static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
|
||||
@ -7679,7 +7677,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
|
||||
case GAUDI_EVENT_MMU_DERR:
|
||||
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
|
||||
@ -7689,7 +7687,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_AXI_ECC:
|
||||
case GAUDI_EVENT_L2_RAM_ECC:
|
||||
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
goto reset_device;
|
||||
@ -7698,7 +7696,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_HBM1_SPI_0:
|
||||
case GAUDI_EVENT_HBM2_SPI_0:
|
||||
case GAUDI_EVENT_HBM3_SPI_0:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
gaudi_hbm_read_interrupts(hdev,
|
||||
gaudi_hbm_event_to_dev(event_type),
|
||||
&eq_entry->hbm_ecc_data);
|
||||
@ -7710,7 +7708,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_HBM1_SPI_1:
|
||||
case GAUDI_EVENT_HBM2_SPI_1:
|
||||
case GAUDI_EVENT_HBM3_SPI_1:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
gaudi_hbm_read_interrupts(hdev,
|
||||
gaudi_hbm_event_to_dev(event_type),
|
||||
&eq_entry->hbm_ecc_data);
|
||||
@ -7732,7 +7730,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
* if the event is a TPC Assertion or a "real" TPC DEC.
|
||||
*/
|
||||
event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
reset_required = gaudi_tpc_read_interrupts(hdev,
|
||||
tpc_dec_event_to_tpc_id(event_type),
|
||||
"AXI_SLV_DEC_Error");
|
||||
@ -7757,7 +7755,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_TPC5_KRN_ERR:
|
||||
case GAUDI_EVENT_TPC6_KRN_ERR:
|
||||
case GAUDI_EVENT_TPC7_KRN_ERR:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
reset_required = gaudi_tpc_read_interrupts(hdev,
|
||||
tpc_krn_event_to_tpc_id(event_type),
|
||||
"KRN_ERR");
|
||||
@ -7796,7 +7794,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
|
||||
fallthrough;
|
||||
case GAUDI_EVENT_MMU_SERR:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
@ -7806,14 +7804,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_CPU_AXI_SPLITTER:
|
||||
case GAUDI_EVENT_PSOC_AXI_DEC:
|
||||
case GAUDI_EVENT_PSOC_PRSTN_FALL:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
break;
|
||||
|
||||
case GAUDI_EVENT_MMU_PAGE_FAULT:
|
||||
case GAUDI_EVENT_MMU_WR_PERM:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
break;
|
||||
@ -7842,14 +7840,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_NIC4_QM1:
|
||||
case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
|
||||
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
gaudi_handle_qman_err(hdev, event_type, &event_mask);
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET);
|
||||
break;
|
||||
|
||||
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
|
||||
gaudi_print_irq_info(hdev, event_type, true);
|
||||
gaudi_print_irq_info(hdev, event_type, true, &event_mask);
|
||||
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
goto reset_device;
|
||||
|
||||
@ -7862,7 +7860,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
case GAUDI_EVENT_TPC6_BMON_SPMU:
|
||||
case GAUDI_EVENT_TPC7_BMON_SPMU:
|
||||
case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
break;
|
||||
@ -7874,7 +7872,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
break;
|
||||
|
||||
case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
gaudi_print_sm_sei_info(hdev, event_type,
|
||||
&eq_entry->sm_sei_data);
|
||||
rc = hl_state_dump(hdev);
|
||||
@ -7903,18 +7901,18 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
break;
|
||||
|
||||
case GAUDI_EVENT_DEV_RESET_REQ:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
goto reset_device;
|
||||
|
||||
case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
goto reset_device;
|
||||
|
||||
case GAUDI_EVENT_FW_ALIVE_S:
|
||||
gaudi_print_irq_info(hdev, event_type, false);
|
||||
gaudi_print_irq_info(hdev, event_type, false, &event_mask);
|
||||
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
goto reset_device;
|
||||
@ -7946,14 +7944,14 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
|
||||
reset_required = false;
|
||||
}
|
||||
|
||||
/* despite reset doesn't execute. a notification on
|
||||
* occurred event needs to be sent here
|
||||
*/
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
if (reset_required)
|
||||
hl_device_reset(hdev, flags);
|
||||
else
|
||||
if (reset_required) {
|
||||
hl_device_cond_reset(hdev, flags, event_mask);
|
||||
} else {
|
||||
hl_fw_unmask_irq(hdev, event_type);
|
||||
/* Notification on occurred event needs to be sent although reset is not executed */
|
||||
if (event_mask)
|
||||
hl_notifier_event_send_all(hdev, event_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,8 +23,6 @@
|
||||
|
||||
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
|
||||
|
||||
#define GAUDI2_FPGA_CPU_TIMEOUT 100000000 /* 100s */
|
||||
|
||||
#define NUMBER_OF_PDMA_QUEUES 2
|
||||
#define NUMBER_OF_EDMA_QUEUES 8
|
||||
#define NUMBER_OF_MME_QUEUES 4
|
||||
|
@ -1764,6 +1764,7 @@ static const struct range gaudi2_pb_nic0_qm_arc_aux0_unsecured_regs[] = {
|
||||
{mmNIC0_QM_ARC_AUX0_CLUSTER_NUM, mmNIC0_QM_ARC_AUX0_WAKE_UP_EVENT},
|
||||
{mmNIC0_QM_ARC_AUX0_ARC_RST_REQ, mmNIC0_QM_ARC_AUX0_CID_OFFSET_7},
|
||||
{mmNIC0_QM_ARC_AUX0_SCRATCHPAD_0, mmNIC0_QM_ARC_AUX0_INFLIGHT_LBU_RD_CNT},
|
||||
{mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_CBU_EARLY_BRESP_EN},
|
||||
{mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN, mmNIC0_QM_ARC_AUX0_LBU_EARLY_BRESP_EN},
|
||||
{mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_BASE_ADDR_0, mmNIC0_QM_ARC_AUX0_DCCM_QUEUE_ALERT_MSG},
|
||||
{mmNIC0_QM_ARC_AUX0_DCCM_Q_PUSH_FIFO_CNT, mmNIC0_QM_ARC_AUX0_QMAN_ARC_CQ_SHADOW_CI},
|
||||
|
@ -4475,8 +4475,8 @@ static void goya_print_out_of_sync_info(struct hl_device *hdev,
|
||||
{
|
||||
struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
|
||||
|
||||
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
|
||||
sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
|
||||
dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
|
||||
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
|
||||
}
|
||||
|
||||
static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
|
||||
|
@ -957,6 +957,7 @@ enum gaudi2_async_event_id {
|
||||
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0 = 1317,
|
||||
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
|
||||
GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
|
||||
GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320,
|
||||
GAUDI2_EVENT_SIZE,
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright 2018-2021 HabanaLabs, Ltd.
|
||||
* Copyright 2018-2022 HabanaLabs, Ltd.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
*/
|
||||
@ -2663,6 +2663,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
.msg = 1, .reset = 0, .name = "STATUS_NIC11_ENG1" },
|
||||
{ .fc_id = 1319, .cpu_id = 625, .valid = 1,
|
||||
.msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" },
|
||||
{ .fc_id = 1320, .cpu_id = 626, .valid = 1,
|
||||
.msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" },
|
||||
};
|
||||
|
||||
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
|
||||
|
@ -20,4 +20,11 @@
|
||||
#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
|
||||
PCI_CONFIG_ELBI_STS_DONE)
|
||||
|
||||
enum hl_revision_id {
|
||||
/* PCI revision ID 0 is not legal */
|
||||
REV_ID_INVALID = 0x00,
|
||||
REV_ID_A = 0x01,
|
||||
REV_ID_B = 0x02,
|
||||
};
|
||||
|
||||
#endif /* INCLUDE_PCI_GENERAL_H_ */
|
||||
|
@ -597,6 +597,10 @@ enum gaudi2_engine_id {
|
||||
GAUDI2_ENGINE_ID_NIC10_1,
|
||||
GAUDI2_ENGINE_ID_NIC11_0,
|
||||
GAUDI2_ENGINE_ID_NIC11_1,
|
||||
GAUDI2_ENGINE_ID_PCIE,
|
||||
GAUDI2_ENGINE_ID_PSOC,
|
||||
GAUDI2_ENGINE_ID_ARC_FARM,
|
||||
GAUDI2_ENGINE_ID_KDMA,
|
||||
GAUDI2_ENGINE_ID_SIZE
|
||||
};
|
||||
|
||||
@ -717,6 +721,8 @@ enum hl_server_type {
|
||||
* HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
|
||||
* HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state
|
||||
* HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
|
||||
* HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened
|
||||
* HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened
|
||||
*/
|
||||
#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
|
||||
#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
|
||||
@ -725,6 +731,8 @@ enum hl_server_type {
|
||||
#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
|
||||
#define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5)
|
||||
#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
|
||||
#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7)
|
||||
#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8)
|
||||
|
||||
/* Opcode for management ioctl
|
||||
*
|
||||
@ -778,6 +786,9 @@ enum hl_server_type {
|
||||
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
|
||||
* HL_INFO_GET_EVENTS - Retrieve the last occurred events
|
||||
* HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
|
||||
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
|
||||
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
|
||||
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
|
||||
*/
|
||||
#define HL_INFO_HW_IP_INFO 0
|
||||
#define HL_INFO_HW_EVENTS 1
|
||||
@ -809,6 +820,8 @@ enum hl_server_type {
|
||||
#define HL_INFO_GET_EVENTS 30
|
||||
#define HL_INFO_UNDEFINED_OPCODE_EVENT 31
|
||||
#define HL_INFO_ENGINE_STATUS 32
|
||||
#define HL_INFO_PAGE_FAULT_EVENT 33
|
||||
#define HL_INFO_USER_MAPPINGS 34
|
||||
|
||||
#define HL_INFO_VERSION_MAX_LEN 128
|
||||
#define HL_INFO_CARD_NAME_MAX_LEN 16
|
||||
@ -859,6 +872,7 @@ enum hl_server_type {
|
||||
* @number_of_user_interrupts: The number of interrupts that are available to the userspace
|
||||
* application to use. Relevant for Gaudi2 and later.
|
||||
* @device_mem_alloc_default_page_size: default page size used in device memory allocation.
|
||||
* @revision_id: PCI revision ID of the ASIC.
|
||||
*/
|
||||
struct hl_info_hw_ip_info {
|
||||
__u64 sram_base_address;
|
||||
@ -889,6 +903,12 @@ struct hl_info_hw_ip_info {
|
||||
__u16 pad2;
|
||||
__u64 reserved4;
|
||||
__u64 device_mem_alloc_default_page_size;
|
||||
__u64 reserved5;
|
||||
__u64 reserved6;
|
||||
__u32 reserved7;
|
||||
__u8 reserved8;
|
||||
__u8 revision_id;
|
||||
__u8 pad[2];
|
||||
};
|
||||
|
||||
struct hl_info_dram_usage {
|
||||
@ -896,7 +916,7 @@ struct hl_info_dram_usage {
|
||||
__u64 ctx_dram_mem;
|
||||
};
|
||||
|
||||
#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2
|
||||
#define HL_BUSY_ENGINES_MASK_EXT_SIZE 4
|
||||
|
||||
struct hl_info_hw_idle {
|
||||
__u32 is_idle;
|
||||
@ -1071,31 +1091,44 @@ struct hl_info_cs_timeout_event {
|
||||
__u64 seq;
|
||||
};
|
||||
|
||||
#define HL_RAZWI_PAGE_FAULT 0
|
||||
#define HL_RAZWI_MMU_ACCESS_ERROR 1
|
||||
#define HL_RAZWI_NA_ENG_ID U16_MAX
|
||||
#define HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR 128
|
||||
#define HL_RAZWI_READ BIT(0)
|
||||
#define HL_RAZWI_WRITE BIT(1)
|
||||
#define HL_RAZWI_LBW BIT(2)
|
||||
#define HL_RAZWI_HBW BIT(3)
|
||||
#define HL_RAZWI_RR BIT(4)
|
||||
#define HL_RAZWI_ADDR_DEC BIT(5)
|
||||
|
||||
/**
|
||||
* struct hl_info_razwi_event - razwi information.
|
||||
* @timestamp: timestamp of razwi.
|
||||
* @addr: address which accessing it caused razwi.
|
||||
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does not
|
||||
* have engine id it will be set to U16_MAX.
|
||||
* @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
|
||||
* engines which one them caused the razwi. In that case, it will contain the
|
||||
* second possible engine id, otherwise it will be set to U16_MAX.
|
||||
* @no_engine_id: if razwi initiator does not have engine id, this field will be set to 1,
|
||||
* otherwise 0.
|
||||
* @error_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
|
||||
* @pad: padding to 64 bit.
|
||||
* @engine_id: engine id of the razwi initiator, if it was initiated by engine that does not
|
||||
* have engine id it will be set to HL_RAZWI_NA_ENG_ID. If there are several possible
|
||||
* engines which caused the razwi, it will hold all of them.
|
||||
* @num_of_possible_engines: contains number of possible engine ids. In some asics, razwi indication
|
||||
* might be common for several engines and there is no way to get the
|
||||
* exact engine. In this way, engine_id array will be filled with all
|
||||
* possible engines caused this razwi. Also, there might be possibility
|
||||
* in gaudi, where we don't indication on specific engine, in that case
|
||||
* the value of this parameter will be zero.
|
||||
* @flags: bitmask for additional data: HL_RAZWI_READ - razwi caused by read operation
|
||||
* HL_RAZWI_WRITE - razwi caused by write operation
|
||||
* HL_RAZWI_LBW - razwi caused by lbw fabric transaction
|
||||
* HL_RAZWI_HBW - razwi caused by hbw fabric transaction
|
||||
* HL_RAZWI_RR - razwi caused by range register
|
||||
* HL_RAZWI_ADDR_DEC - razwi caused by address decode error
|
||||
* Note: this data is not supported by all asics, in that case the relevant bits will not
|
||||
* be set.
|
||||
*/
|
||||
struct hl_info_razwi_event {
|
||||
__s64 timestamp;
|
||||
__u64 addr;
|
||||
__u16 engine_id_1;
|
||||
__u16 engine_id_2;
|
||||
__u8 no_engine_id;
|
||||
__u8 error_type;
|
||||
__u8 pad[2];
|
||||
__u16 engine_id[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR];
|
||||
__u16 num_of_possible_engines;
|
||||
__u8 flags;
|
||||
__u8 pad[5];
|
||||
};
|
||||
|
||||
#define MAX_QMAN_STREAMS_INFO 4
|
||||
@ -1174,6 +1207,29 @@ struct hl_info_sec_attest {
|
||||
__u8 pad0[2];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_page_fault_info - page fault information.
|
||||
* @timestamp: timestamp of page fault.
|
||||
* @addr: address which accessing it caused page fault.
|
||||
* @engine_id: engine id which caused the page fault, supported only in gaudi3.
|
||||
*/
|
||||
struct hl_page_fault_info {
|
||||
__s64 timestamp;
|
||||
__u64 addr;
|
||||
__u16 engine_id;
|
||||
__u8 pad[6];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_user_mapping - user mapping information.
|
||||
* @dev_va: device virtual address.
|
||||
* @size: virtual address mapping size.
|
||||
*/
|
||||
struct hl_user_mapping {
|
||||
__u64 dev_va;
|
||||
__u64 size;
|
||||
};
|
||||
|
||||
enum gaudi_dcores {
|
||||
HL_GAUDI_WS_DCORE,
|
||||
HL_GAUDI_WN_DCORE,
|
||||
@ -1200,6 +1256,8 @@ enum gaudi_dcores {
|
||||
* needed, hence updating this variable so user will know the exact amount
|
||||
* of bytes copied by the kernel to the buffer.
|
||||
* @sec_attest_nonce: Nonce number used for attestation report.
|
||||
* @array_size: Number of array members copied to user buffer.
|
||||
* Relevant for HL_INFO_USER_MAPPINGS info ioctl.
|
||||
* @pad: Padding to 64 bit.
|
||||
*/
|
||||
struct hl_info_args {
|
||||
@ -1215,6 +1273,7 @@ struct hl_info_args {
|
||||
__u32 eventfd;
|
||||
__u32 user_buffer_actual_size;
|
||||
__u32 sec_attest_nonce;
|
||||
__u32 array_size;
|
||||
};
|
||||
|
||||
__u32 pad;
|
||||
|
Loading…
Reference in New Issue
Block a user