mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-13 16:50:05 +00:00
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes for 4.16, nothing major. A few more fixes for 4.16. This is on top of the pull request from last week. Most notable change here is a fix to the link order for the now separate from amdgpu GPU scheduler to fix crashes when the modules are build into the kernel rather than as modules. * 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: drm: fix gpu scheduler link order drm/amd/display: Demote error print to debug print when ATOM impl missing drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) drm/amd/amdgpu: Add Polaris version check drm/amdgpu: Reenable manual GPU reset from sysfs drm/amdgpu: disable MMHUB power gating on raven drm/ttm: Don't unreserve swapped BOs that were previously reserved drm/ttm: Don't add swapped BOs to swap-LRU list drm/amdgpu: only check for ECC on Vega10 drm/amd/powerplay: Fix smu_table_entry.handle type drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count drm/radeon: fill in rb backend map on evergreen/ni. drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2) drm/ttm: only free pages rather than update global memory count together drm/amdgpu: fix CPU based VM updates drm/amdgpu: fix typo in amdgpu_vce_validate_bo drm/amdgpu: fix amdgpu_vm_pasid_fault_credit drm/ttm: check the return value of register_shrinker drm/radeon: fix sparse warning: Should it be static?
This commit is contained in:
commit
22bc72c807
@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
|
||||
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
|
||||
obj-$(CONFIG_DRM_ARM) += arm/
|
||||
obj-$(CONFIG_DRM_TTM) += ttm/
|
||||
obj-$(CONFIG_DRM_SCHED) += scheduler/
|
||||
obj-$(CONFIG_DRM_TDFX) += tdfx/
|
||||
obj-$(CONFIG_DRM_R128) += r128/
|
||||
obj-y += amd/lib/
|
||||
@ -102,4 +103,3 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
|
||||
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
|
||||
obj-$(CONFIG_DRM_PL111) += pl111/
|
||||
obj-$(CONFIG_DRM_TVE200) += tve200/
|
||||
obj-$(CONFIG_DRM_SCHED) += scheduler/
|
||||
|
@ -1874,8 +1874,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
* ignore it */
|
||||
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
|
||||
|
||||
if (amdgpu_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if (amdgpu_device_is_px(ddev))
|
||||
runtime = true;
|
||||
if (!pci_is_thunderbolt_attached(adev->pdev))
|
||||
@ -2619,7 +2617,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
uint64_t reset_flags = 0;
|
||||
int i, r, resched;
|
||||
|
||||
if (!amdgpu_device_ip_check_soft_reset(adev)) {
|
||||
if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
|
||||
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -585,8 +585,8 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
|
||||
|
||||
for (i = 0; i < bo->placement.num_placement; ++i) {
|
||||
bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
|
||||
bo->placements[i].lpfn = bo->placements[i].fpfn ?
|
||||
min(bo->placements[i].fpfn, lpfn) : lpfn;
|
||||
bo->placements[i].lpfn = bo->placements[i].lpfn ?
|
||||
min(bo->placements[i].lpfn, lpfn) : lpfn;
|
||||
}
|
||||
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
}
|
||||
|
@ -970,12 +970,16 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
|
||||
amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
|
||||
&dst, &flags);
|
||||
|
||||
if (parent->base.bo->shadow) {
|
||||
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
|
||||
pde = pd_addr + (entry - parent->entries) * 8;
|
||||
p->func(p, pde, dst, 1, 0, flags);
|
||||
if (p->func == amdgpu_vm_cpu_set_ptes) {
|
||||
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
|
||||
} else {
|
||||
if (parent->base.bo->shadow) {
|
||||
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
|
||||
pde = pd_addr + (entry - parent->entries) * 8;
|
||||
p->func(p, pde, dst, 1, 0, flags);
|
||||
}
|
||||
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
|
||||
}
|
||||
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
|
||||
pde = pd_addr + (entry - parent->entries) * 8;
|
||||
p->func(p, pde, dst, 1, 0, flags);
|
||||
}
|
||||
@ -2478,17 +2482,21 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
|
||||
|
||||
spin_lock(&adev->vm_manager.pasid_lock);
|
||||
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
if (!vm)
|
||||
if (!vm) {
|
||||
/* VM not found, can't track fault credit */
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* No lock needed. only accessed by IRQ handler */
|
||||
if (!vm->fault_credit)
|
||||
if (!vm->fault_credit) {
|
||||
/* Too many faults in this VM */
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
vm->fault_credit--;
|
||||
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1068,8 +1068,8 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
|
||||
adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
|
||||
adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
|
||||
adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
|
||||
adev->gfx.ngg.gds_reserve_addr = SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE);
|
||||
adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
|
||||
adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
|
||||
adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
|
||||
|
||||
/* Primitive Buffer */
|
||||
r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
|
||||
@ -1181,13 +1181,14 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
|
||||
amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
|
||||
PACKET3_DMA_DATA_DST_SEL(1) |
|
||||
PACKET3_DMA_DATA_SRC_SEL(2)));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
|
||||
adev->gfx.ngg.gds_reserve_size);
|
||||
|
||||
gfx_v9_0_write_data_to_reg(ring, 0, false,
|
||||
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
|
||||
|
@ -634,14 +634,16 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
||||
BUG_ON(vm_inv_eng[i] > 16);
|
||||
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
if (r == 1) {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
} else if (r == 0) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
} else {
|
||||
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
|
||||
return r;
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
if (r == 1) {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
} else if (r == 0) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
} else {
|
||||
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
|
@ -666,8 +666,8 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
|
||||
AMD_PG_SUPPORT_MMHUB;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA;
|
||||
|
||||
adev->external_rev_id = 0x1;
|
||||
break;
|
||||
default:
|
||||
|
@ -37,6 +37,9 @@
|
||||
#include "gmc/gmc_8_1_d.h"
|
||||
#include "vi.h"
|
||||
|
||||
/* Polaris10/11/12 firmware version */
|
||||
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
|
||||
|
||||
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
|
||||
|
||||
@ -58,7 +61,9 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
|
||||
*/
|
||||
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
|
||||
{
|
||||
return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
|
||||
return ((adev->asic_type >= CHIP_POLARIS10) &&
|
||||
(adev->asic_type <= CHIP_POLARIS12) &&
|
||||
(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -411,7 +416,15 @@ static int uvd_v6_0_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (uvd_v6_0_enc_support(adev)) {
|
||||
if (!uvd_v6_0_enc_support(adev)) {
|
||||
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
|
||||
adev->uvd.ring_enc[i].funcs = NULL;
|
||||
|
||||
adev->uvd.irq.num_types = 1;
|
||||
adev->uvd.num_enc_rings = 0;
|
||||
|
||||
DRM_INFO("UVD ENC is disabled\n");
|
||||
} else {
|
||||
struct drm_sched_rq *rq;
|
||||
ring = &adev->uvd.ring_enc[0];
|
||||
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
|
||||
|
@ -387,7 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
|
||||
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have transmitter_control for v%d\n", crev);
|
||||
dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
|
||||
bp->cmd_tbl.transmitter_control = NULL;
|
||||
break;
|
||||
}
|
||||
@ -911,7 +911,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
|
||||
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_pixel_clock for v%d\n",
|
||||
dm_output_to_console("Don't have set_pixel_clock for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
|
||||
bp->cmd_tbl.set_pixel_clock = NULL;
|
||||
break;
|
||||
@ -1230,7 +1230,7 @@ static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
|
||||
enable_spread_spectrum_on_ppll_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n",
|
||||
dm_output_to_console("Don't have enable_spread_spectrum_on_ppll for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
|
||||
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
|
||||
break;
|
||||
@ -1427,7 +1427,7 @@ static void init_adjust_display_pll(struct bios_parser *bp)
|
||||
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have adjust_display_pll for v%d\n",
|
||||
dm_output_to_console("Don't have adjust_display_pll for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
|
||||
bp->cmd_tbl.adjust_display_pll = NULL;
|
||||
break;
|
||||
@ -1702,7 +1702,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
|
||||
set_crtc_using_dtd_timing_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_crtc_timing for dtd v%d\n",
|
||||
dm_output_to_console("Don't have set_crtc_timing for dtd v%d\n",
|
||||
dtd_version);
|
||||
bp->cmd_tbl.set_crtc_timing = NULL;
|
||||
break;
|
||||
@ -1713,7 +1713,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
|
||||
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_crtc_timing for v%d\n",
|
||||
dm_output_to_console("Don't have set_crtc_timing for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
|
||||
bp->cmd_tbl.set_crtc_timing = NULL;
|
||||
break;
|
||||
@ -1901,7 +1901,7 @@ static void init_select_crtc_source(struct bios_parser *bp)
|
||||
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't select_crtc_source enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
|
||||
bp->cmd_tbl.select_crtc_source = NULL;
|
||||
break;
|
||||
@ -2010,7 +2010,7 @@ static void init_enable_crtc(struct bios_parser *bp)
|
||||
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't have enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
|
||||
bp->cmd_tbl.enable_crtc = NULL;
|
||||
break;
|
||||
@ -2118,7 +2118,7 @@ static void init_program_clock(struct bios_parser *bp)
|
||||
bp->cmd_tbl.program_clock = program_clock_v6;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have program_clock for v%d\n",
|
||||
dm_output_to_console("Don't have program_clock for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
|
||||
bp->cmd_tbl.program_clock = NULL;
|
||||
break;
|
||||
@ -2341,7 +2341,7 @@ static void init_enable_disp_power_gating(
|
||||
enable_disp_power_gating_v2_1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating));
|
||||
bp->cmd_tbl.enable_disp_power_gating = NULL;
|
||||
break;
|
||||
@ -2390,7 +2390,7 @@ static void init_set_dce_clock(struct bios_parser *bp)
|
||||
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_dce_clock for v%d\n",
|
||||
dm_output_to_console("Don't have set_dce_clock for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock));
|
||||
bp->cmd_tbl.set_dce_clock = NULL;
|
||||
break;
|
||||
|
@ -118,7 +118,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
|
||||
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have dig_encoder_control for v%d\n", version);
|
||||
dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
|
||||
bp->cmd_tbl.dig_encoder_control = NULL;
|
||||
break;
|
||||
}
|
||||
@ -206,7 +206,7 @@ static void init_transmitter_control(struct bios_parser *bp)
|
||||
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have transmitter_control for v%d\n", crev);
|
||||
dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
|
||||
bp->cmd_tbl.transmitter_control = NULL;
|
||||
break;
|
||||
}
|
||||
@ -270,7 +270,7 @@ static void init_set_pixel_clock(struct bios_parser *bp)
|
||||
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_pixel_clock for v%d\n",
|
||||
dm_output_to_console("Don't have set_pixel_clock for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
|
||||
bp->cmd_tbl.set_pixel_clock = NULL;
|
||||
break;
|
||||
@ -383,7 +383,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
|
||||
set_crtc_using_dtd_timing_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_crtc_timing for v%d\n", dtd_version);
|
||||
dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
|
||||
bp->cmd_tbl.set_crtc_timing = NULL;
|
||||
break;
|
||||
}
|
||||
@ -503,7 +503,7 @@ static void init_select_crtc_source(struct bios_parser *bp)
|
||||
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't select_crtc_source enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
|
||||
bp->cmd_tbl.select_crtc_source = NULL;
|
||||
break;
|
||||
@ -572,7 +572,7 @@ static void init_enable_crtc(struct bios_parser *bp)
|
||||
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't have enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
|
||||
bp->cmd_tbl.enable_crtc = NULL;
|
||||
break;
|
||||
@ -670,7 +670,7 @@ static void init_enable_disp_power_gating(
|
||||
enable_disp_power_gating_v2_1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
|
||||
dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
|
||||
bp->cmd_tbl.enable_disp_power_gating = NULL;
|
||||
break;
|
||||
@ -721,7 +721,7 @@ static void init_set_dce_clock(struct bios_parser *bp)
|
||||
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
|
||||
break;
|
||||
default:
|
||||
dm_error("Don't have set_dce_clock for v%d\n",
|
||||
dm_output_to_console("Don't have set_dce_clock for v%d\n",
|
||||
BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
|
||||
bp->cmd_tbl.set_dce_clock = NULL;
|
||||
break;
|
||||
|
@ -40,7 +40,7 @@ struct smu_table_entry {
|
||||
uint32_t table_addr_high;
|
||||
uint32_t table_addr_low;
|
||||
uint8_t *table;
|
||||
uint32_t handle;
|
||||
unsigned long handle;
|
||||
};
|
||||
|
||||
struct smu_table_array {
|
||||
|
@ -3513,6 +3513,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||
}
|
||||
rdev->config.evergreen.backend_map = tmp;
|
||||
WREG32(GB_BACKEND_MAP, tmp);
|
||||
|
||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||
|
@ -1148,6 +1148,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
rdev->config.cayman.max_shader_engines,
|
||||
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
|
||||
}
|
||||
rdev->config.cayman.backend_map = tmp;
|
||||
WREG32(GB_BACKEND_MAP, tmp);
|
||||
|
||||
cgts_tcc_disable = 0xffff0000;
|
||||
|
@ -328,7 +328,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
const struct drm_dp_mst_topology_cbs mst_cbs = {
|
||||
static const struct drm_dp_mst_topology_cbs mst_cbs = {
|
||||
.add_connector = radeon_dp_add_mst_connector,
|
||||
.register_connector = radeon_dp_register_mst_connector,
|
||||
.destroy_connector = radeon_dp_destroy_mst_connector,
|
||||
|
@ -170,7 +170,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
||||
list_add_tail(&bo->lru, &man->lru[bo->priority]);
|
||||
kref_get(&bo->list_kref);
|
||||
|
||||
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
|
||||
if (bo->ttm && !(bo->ttm->page_flags &
|
||||
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
|
||||
list_add_tail(&bo->swap,
|
||||
&bo->glob->swap_lru[bo->priority]);
|
||||
kref_get(&bo->list_kref);
|
||||
@ -1779,8 +1780,8 @@ out:
|
||||
* Unreserve without putting on LRU to avoid swapping out an
|
||||
* already swapped buffer.
|
||||
*/
|
||||
|
||||
reservation_object_unlock(bo->resv);
|
||||
if (locked)
|
||||
reservation_object_unlock(bo->resv);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
return ret;
|
||||
}
|
||||
|
@ -477,12 +477,12 @@ ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
return count;
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
{
|
||||
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
|
||||
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
|
||||
manager->mm_shrink.seeks = 1;
|
||||
register_shrinker(&manager->mm_shrink);
|
||||
return register_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
@ -1034,15 +1034,18 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
|
||||
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
|
||||
&glob->kobj, "pool");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_pool_mm_shrink_init(_manager);
|
||||
if (unlikely(ret != 0))
|
||||
goto error;
|
||||
|
||||
ret = ttm_pool_mm_shrink_init(_manager);
|
||||
if (unlikely(ret != 0))
|
||||
goto error;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ttm_page_alloc_fini(void)
|
||||
@ -1072,7 +1075,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1080,7 +1084,8 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
PAGE_SIZE, ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
ttm_put_pages(ttm->pages, ttm->num_pages,
|
||||
ttm->page_flags, ttm->caching_state);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -61,6 +61,7 @@
|
||||
#define SMALL_ALLOCATION 4
|
||||
#define FREE_ALL_PAGES (~0U)
|
||||
#define VADDR_FLAG_HUGE_POOL 1UL
|
||||
#define VADDR_FLAG_UPDATED_COUNT 2UL
|
||||
|
||||
enum pool_type {
|
||||
IS_UNDEFINED = 0,
|
||||
@ -874,18 +875,18 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
|
||||
}
|
||||
|
||||
/*
|
||||
* @return count of pages still required to fulfill the request.
|
||||
* The populate list is actually a stack (not that is matters as TTM
|
||||
* allocates one page at a time.
|
||||
* return dma_page pointer if success, otherwise NULL.
|
||||
*/
|
||||
static int ttm_dma_pool_get_pages(struct dma_pool *pool,
|
||||
static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
|
||||
struct ttm_dma_tt *ttm_dma,
|
||||
unsigned index)
|
||||
{
|
||||
struct dma_page *d_page;
|
||||
struct dma_page *d_page = NULL;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
unsigned long irq_flags;
|
||||
int count, r = -ENOMEM;
|
||||
int count;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
|
||||
@ -894,12 +895,11 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
|
||||
ttm->pages[index] = d_page->p;
|
||||
ttm_dma->dma_address[index] = d_page->dma;
|
||||
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
|
||||
r = 0;
|
||||
pool->npages_in_use += 1;
|
||||
pool->npages_free -= 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
return r;
|
||||
return d_page;
|
||||
}
|
||||
|
||||
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
||||
@ -934,6 +934,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned long num_pages = ttm->num_pages;
|
||||
struct dma_pool *pool;
|
||||
struct dma_page *d_page;
|
||||
enum pool_type type;
|
||||
unsigned i;
|
||||
int ret;
|
||||
@ -962,8 +963,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
while (num_pages >= HPAGE_PMD_NR) {
|
||||
unsigned j;
|
||||
|
||||
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (ret != 0)
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (!d_page)
|
||||
break;
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
@ -973,6 +974,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
|
||||
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
|
||||
ttm->pages[j] = ttm->pages[j - 1] + 1;
|
||||
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
|
||||
@ -996,8 +998,8 @@ skip_huge:
|
||||
}
|
||||
|
||||
while (num_pages) {
|
||||
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (ret != 0) {
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
if (!d_page) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1009,6 +1011,7 @@ skip_huge:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
|
||||
++i;
|
||||
--num_pages;
|
||||
}
|
||||
@ -1049,8 +1052,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
continue;
|
||||
|
||||
count++;
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p, pool->size);
|
||||
if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p, pool->size);
|
||||
d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
|
||||
}
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
}
|
||||
|
||||
@ -1070,9 +1076,19 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
|
||||
/* make sure pages array match list and count number of pages */
|
||||
count = 0;
|
||||
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
|
||||
page_list) {
|
||||
ttm->pages[count] = d_page->p;
|
||||
count++;
|
||||
|
||||
if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p, pool->size);
|
||||
d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
|
||||
}
|
||||
|
||||
if (is_cached)
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
@ -1092,19 +1108,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
if (is_cached) {
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
d_page->p, pool->size);
|
||||
ttm_dma_page_put(pool, d_page);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < count; i++) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i], pool->size);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
ttm->pages[i] = NULL;
|
||||
@ -1182,12 +1185,12 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
return count;
|
||||
}
|
||||
|
||||
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
{
|
||||
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
|
||||
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
|
||||
manager->mm_shrink.seeks = 1;
|
||||
register_shrinker(&manager->mm_shrink);
|
||||
return register_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
@ -1197,7 +1200,7 @@ static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
|
||||
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
|
||||
WARN_ON(_manager);
|
||||
|
||||
@ -1205,7 +1208,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
|
||||
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
||||
if (!_manager)
|
||||
goto err;
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&_manager->lock);
|
||||
INIT_LIST_HEAD(&_manager->pools);
|
||||
@ -1217,13 +1220,17 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
/* This takes care of auto-freeing the _manager */
|
||||
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
|
||||
&glob->kobj, "dma_pool");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&_manager->kobj);
|
||||
goto err;
|
||||
}
|
||||
ttm_dma_pool_mm_shrink_init(_manager);
|
||||
if (unlikely(ret != 0))
|
||||
goto error;
|
||||
|
||||
ret = ttm_dma_pool_mm_shrink_init(_manager);
|
||||
if (unlikely(ret != 0))
|
||||
goto error;
|
||||
return 0;
|
||||
err:
|
||||
|
||||
error:
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user