Core Changes:

- drm: Add SDP Error Detection Configuration Register (Arun)
 
 Driver Changes:
 - Meteor Lake enabling and fixes (RK, Jose, Madhumitha)
 - Lock the fbdev obj before vma pin (Tejas)
 - DSC fixes (Stanislav)
 - Fixes and clean-up on opregion code (Imre)
 - More wm/vblank stuff (Ville)
 - More general display code organization (Jani)
 - DP Fixes (Stanislav, Ville)
 - Introduce flags to ignore long HPD and link training issues \
   for handling spurious issues on CI (Vinod)
 - Plane cleanups and extra registers (Ville)
 - Update audio keepalive clock values (Clint)
 - Rename find_section to bdb_find_section (Maarten)
 - DP SDP CRC16 for 128b132b link layer (Arun)
 - Fix various issues with noarm register writes (Ville)
 - Fix a few TypeC / MST issues (Imre)
 - Create GSC submission targeting HDCP and PXP usages on MTL+ (Suraj)
 - Enable HDCP2.x via GSC CS (Suraj)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmQcuZoACgkQ+mJfZA7r
 E8oJRAf/YxeGEA154XQjwF1918G2PF3md8WLiR+ocQY0ilXKHIM4gbOT0RbAjjuu
 8Tt5wDgIYG5Q5R1qdQYBl5s6McohdBUmXUqpwPXtud6FXgTYRIbklwtRmNN7lwZm
 GDhv2kGE6T5PpiU118Ioz2ArRSUXrWpnJCynmqPVX9FfOIPl6ib7bK6RapzCLfM6
 mxs4iyhQ7E2CyXw6+qtd4/AAUf65sAI5ZDs2yHP2r1CHx8/lJwcp11lcKvgO+UB+
 ds26+yaPGYFbx+wE3+8vaTzCwkCkqnl2922IVBGADcCJH9poAcLvU5CLRtYxMuTH
 L+cOAtQ85go7/mPumYF+x4ifs8wvXA==
 =Gu3F
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-next-2023-03-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Core Changes:
- drm: Add SDP Error Detection Configuration Register (Arun)

Driver Changes:
- Meteor Lake enabling and fixes (RK, Jose, Madhumitha)
- Lock the fbdev obj before vma pin (Tejas)
- DSC fixes (Stanislav)
- Fixes and clean-up on opregion code (Imre)
- More wm/vblank stuff (Ville)
- More general display code organization (Jani)
- DP Fixes (Stanislav, Ville)
- Introduce flags to ignore long HPD and link training issues \
  for handling spurious issues on CI (Vinod)
- Plane cleanups and extra registers (Ville)
- Update audio keepalive clock values (Clint)
- Rename find_section to bdb_find_section (Maarten)
- DP SDP CRC16 for 128b132b link layer (Arun)
- Fix various issues with noarm register writes (Ville)
- Fix a few TypeC / MST issues (Imre)
- Create GSC submission targeting HDCP and PXP usages on MTL+ (Suraj)
- Enable HDCP2.x via GSC CS (Suraj)

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZBy56qc9C00tCLOY@intel.com
This commit is contained in:
Daniel Vetter 2023-03-24 20:22:02 +01:00
commit 7ed3492725
58 changed files with 3327 additions and 1472 deletions

View File

@ -195,6 +195,7 @@ i915-y += \
i915-y += \
gt/uc/intel_gsc_fw.o \
gt/uc/intel_gsc_uc.o \
gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@ -255,6 +256,7 @@ i915-y += \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
display/intel_hdcp_gsc.o \
display/intel_hotplug.o \
display/intel_hti.o \
display/intel_lpe_audio.o \
@ -267,6 +269,7 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \
display/intel_tc.o \
display/intel_vblank.o \
display/intel_vga.o \

View File

@ -267,3 +267,40 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
crtc_state->ips_enabled = true;
}
}
static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = m->private;
intel_wakeref_t wakeref;
if (!HAS_IPS(i915))
return -ENODEV;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(i915->params.enable_ips));
if (DISPLAY_VER(i915) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
seq_puts(m, "Currently: enabled\n");
else
seq_puts(m, "Currently: disabled\n");
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
void hsw_ips_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root,
i915, &hsw_ips_debugfs_status_fops);
}

View File

@ -8,6 +8,7 @@
#include <linux/types.h>
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@ -22,5 +23,6 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
void hsw_ips_debugfs_register(struct drm_i915_private *i915);
#endif /* __HSW_IPS_H__ */

View File

@ -1500,7 +1500,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Get the details on which TE should be enabled */
if (is_cmd_mode(intel_dsi))

View File

@ -32,6 +32,7 @@
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include "i915_config.h"
@ -42,7 +43,6 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_watermark.h"
@ -940,6 +940,64 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
* FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
* abuses hsub/vsub so we can't use them here. But as they
* are limited to 32bpp RGB formats we don't actually need
* to check anything.
*/
if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
return 0;
/*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src->x1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
hsub = 2;
vsub = 2;
} else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
}
if (rotated)
hsub = vsub = max(hsub, vsub);
if (src_x % hsub || src_w % hsub) {
drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
src_x, src_w, hsub, str_yes_no(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
src_y, src_h, vsub, str_yes_no(rotated));
return -EINVAL;
}
return 0;
}
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for

View File

@ -62,6 +62,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
bool can_position);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
void intel_plane_helper_add(struct intel_plane *plane);

View File

@ -983,11 +983,7 @@ void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
{
if (refclk == 24000)
aud_ts->m = 12;
else
aud_ts->m = 15;
aud_ts->m = 60;
aud_ts->n = cdclk * aud_ts->m / 24000;
}

View File

@ -141,8 +141,8 @@ struct bdb_block_entry {
};
static const void *
find_section(struct drm_i915_private *i915,
enum bdb_block_id section_id)
bdb_find_section(struct drm_i915_private *i915,
enum bdb_block_id section_id)
{
struct bdb_block_entry *entry;
@ -201,7 +201,7 @@ static size_t lfp_data_min_size(struct drm_i915_private *i915)
const struct bdb_lvds_lfp_data_ptrs *ptrs;
size_t size;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return 0;
@ -630,7 +630,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
{
const struct bdb_lvds_options *lvds_options;
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return -1;
@ -671,11 +671,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
dump_pnp_id(i915, edid_id, "EDID");
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
data = find_section(i915, BDB_LVDS_LFP_DATA);
data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return -1;
@ -791,7 +791,7 @@ parse_panel_options(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
int drrs_mode;
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
@ -881,11 +881,11 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return;
data = find_section(i915, BDB_LVDS_LFP_DATA);
data = bdb_find_section(i915, BDB_LVDS_LFP_DATA);
if (!data)
return;
@ -932,7 +932,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
if (i915->display.vbt.version < 229)
return;
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@ -1011,7 +1011,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
int panel_type = panel->vbt.panel_type;
u16 level;
backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@ -1119,14 +1119,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
if (index == -1) {
const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
return;
index = sdvo_lvds_options->panel_type;
}
dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS);
if (!dtds)
return;
@ -1162,7 +1162,7 @@ parse_general_features(struct drm_i915_private *i915)
{
const struct bdb_general_features *general;
general = find_section(i915, BDB_GENERAL_FEATURES);
general = bdb_find_section(i915, BDB_GENERAL_FEATURES);
if (!general)
return;
@ -1285,7 +1285,7 @@ parse_driver_features(struct drm_i915_private *i915)
{
const struct bdb_driver_features *driver;
driver = find_section(i915, BDB_DRIVER_FEATURES);
driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@ -1322,7 +1322,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
{
const struct bdb_driver_features *driver;
driver = find_section(i915, BDB_DRIVER_FEATURES);
driver = bdb_find_section(i915, BDB_DRIVER_FEATURES);
if (!driver)
return;
@ -1362,7 +1362,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
if (i915->display.vbt.version < 228)
return;
power = find_section(i915, BDB_LFP_POWER);
power = bdb_find_section(i915, BDB_LFP_POWER);
if (!power)
return;
@ -1402,7 +1402,7 @@ parse_edp(struct drm_i915_private *i915,
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
edp = find_section(i915, BDB_EDP);
edp = bdb_find_section(i915, BDB_EDP);
if (!edp)
return;
@ -1532,7 +1532,7 @@ parse_psr(struct drm_i915_private *i915,
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
psr = find_section(i915, BDB_PSR);
psr = bdb_find_section(i915, BDB_PSR);
if (!psr) {
drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
return;
@ -1693,7 +1693,7 @@ parse_mipi_config(struct drm_i915_private *i915,
/* Parse #52 for panel index used from panel_type already
* parsed
*/
start = find_section(i915, BDB_MIPI_CONFIG);
start = bdb_find_section(i915, BDB_MIPI_CONFIG);
if (!start) {
drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
return;
@ -2005,7 +2005,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
sequence = find_section(i915, BDB_MIPI_SEQUENCE);
sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE);
if (!sequence) {
drm_dbg_kms(&i915->drm,
"No MIPI Sequence found, parsing complete\n");
@ -2086,7 +2086,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
if (i915->display.vbt.version < 198)
return;
params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS);
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
@ -2792,7 +2792,7 @@ parse_general_definitions(struct drm_i915_private *i915)
u16 block_size;
int bus_pin;
defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS);
if (!defs) {
drm_dbg_kms(&i915->drm,
"No general definition block is found, no devices defined.\n");

View File

@ -46,6 +46,11 @@ struct intel_color_funcs {
* registers involved with the same commit.
*/
void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
/*
* Perform any extra tasks needed after all the
* double buffered registers have been latched.
*/
void (*color_post_update)(const struct intel_crtc_state *crtc_state);
/*
* Load LUTs (and other single buffered color management
* registers). Will (hopefully) be called during the vblank
@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
/*
* Despite Wa_1406463849, ICL no longer suffers from the SKL
* DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
* Possibly due to the extra sticky CSC arming
* (see icl_color_post_update()).
*
* On TGL+ all CSC arming issues have been properly fixed.
*/
icl_load_csc_matrix(crtc_state);
}
static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
* CSC coeff/offset register values if the CSC registers are disarmed
* between DC5 exit and PSR exit. This will cause the plane(s) to
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
* register programming will be peformed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
}
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black.
*/
intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
intel_de_write(i915, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* Despite Wa_1406463849, ICL CSC is no longer disarmed by
* coeff/offset register *writes*. Instead, once CSC_MODE
* is armed it stays armed, even after it has been latched.
* Afterwards the coeff/offset registers become effectively
* self-arming. That self-arming must be disabled before the
* next icl_color_commit_noarm() tries to write the next set
* of coeff/offset registers. Fortunately register *reads*
* do still disarm the CSC. Naturally this must not be done
* until the previously written CSC registers have actually
* been latched.
*
* TGL+ no longer need this workaround.
*/
intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
}
static struct drm_property_blob *
create_linear_lut(struct drm_i915_private *i915, int lut_size)
{
@ -1376,6 +1449,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
}
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (i915->display.funcs.color->color_post_update)
i915->display.funcs.color->color_post_update(crtc_state);
}
void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@ -3072,10 +3153,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.lut_equal = i9xx_lut_equal,
};
static const struct intel_color_funcs tgl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
.color_commit_arm = icl_color_commit_arm,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
};
static const struct intel_color_funcs icl_color_funcs = {
.color_check = icl_color_check,
.color_commit_noarm = icl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.color_commit_arm = icl_color_commit_arm,
.color_post_update = icl_color_post_update,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
@ -3083,7 +3174,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
@ -3092,7 +3183,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = bdw_read_luts,
@ -3188,7 +3279,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
else
i915->display.funcs.color = &i9xx_color_funcs;
} else {
if (DISPLAY_VER(i915) >= 11)
if (DISPLAY_VER(i915) >= 12)
i915->display.funcs.color = &tgl_color_funcs;
else if (DISPLAY_VER(i915) == 11)
i915->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(i915) == 10)
i915->display.funcs.color = &glk_color_funcs;

View File

@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,

View File

@ -212,7 +212,7 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc)
static int intel_crtc_late_register(struct drm_crtc *crtc)
{
intel_crtc_debugfs_add(crtc);
intel_crtc_debugfs_add(to_intel_crtc(crtc));
return 0;
}
@ -686,6 +686,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
/*
* Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
intel_crtc_update_active_timings(new_crtc_state);
local_irq_enable();
if (intel_vgpu_active(dev_priv))

View File

@ -21,7 +21,6 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "skl_watermark.h"
/* Cursor formats */

View File

@ -65,7 +65,6 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
@ -2520,6 +2519,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (HAS_DP20(dev_priv))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
@ -2618,8 +2621,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
if (intel_crtc_has_dp_encoder(crtc_state))
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK,
DP_TP_CTL_LINK_TRAIN_PAT1);
DP_TP_CTL_ENABLE, 0);
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@ -3140,8 +3142,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
wait = true;
}
dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
dp_tp_ctl &= ~DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
@ -3543,6 +3544,37 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (drm_WARN_ON(&i915->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
return ICL_PORT_DPLL_DEFAULT;
else
return ICL_PORT_DPLL_MG_PHY;
}
enum icl_port_dpll_id
intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
if (!encoder->port_pll_type)
return ICL_PORT_DPLL_DEFAULT;
return encoder->port_pll_type(encoder, crtc_state);
}
static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct intel_shared_dpll *pll)
@ -3555,7 +3587,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
if (pll->info->id == DPLL_ID_ICL_TBTPLL)
if (icl_ddi_tc_pll_is_tbt(pll))
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
else
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
@ -3568,7 +3600,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll))
crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
else
crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
@ -3610,7 +3642,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_tc(i915, phy))
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
crtc_state);
if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
intel_dp_sync_state(encoder, crtc_state);
@ -4404,6 +4437,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_combo_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@ -4416,6 +4450,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
encoder->port_pll_type = icl_ddi_tc_port_pll_type;
encoder->get_config = icl_ddi_tc_get_config;
} else {
encoder->enable_clock = icl_ddi_combo_enable_clock;
@ -4496,6 +4531,16 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
!intel_bios_encoder_supports_typec_usb(devdata) &&
!intel_bios_encoder_supports_tbt(devdata);
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
drm_dbg_kms(&dev_priv->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
is_legacy ? "legacy" : "non-legacy");
}
intel_tc_port_init(dig_port, is_legacy);
encoder->update_prepare = intel_ddi_update_prepare;

View File

@ -40,6 +40,9 @@ void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_disable_clock(struct intel_encoder *encoder);
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
enum icl_port_dpll_id
intel_ddi_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void hsw_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);

View File

@ -111,7 +111,6 @@
#include "intel_quirks.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_tv.h"
#include "intel_vblank.h"
@ -131,7 +130,7 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
@ -1116,6 +1115,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_cursorclk_wa(old_crtc_state) &&
!needs_cursorclk_wa(new_crtc_state))
icl_wa_cursorclkgating(dev_priv, pipe, false);
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@ -1793,7 +1795,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
bdw_set_pipe_misc(new_crtc_state);
if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
!transcoder_is_dsi(cpu_transcoder))
@ -2139,6 +2141,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
@ -3074,20 +3078,20 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
}
static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
if (tmp & PIPEMISC_YUV420_ENABLE) {
if (tmp & PIPE_MISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
drm_WARN_ON(&dev_priv->drm,
(tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
(tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
} else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
return INTEL_OUTPUT_FORMAT_YCBCR444;
} else {
return INTEL_OUTPUT_FORMAT_RGB;
@ -3330,7 +3334,7 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -3338,18 +3342,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPEMISC_BPC_6;
val |= PIPE_MISC_BPC_6;
break;
case 24:
val |= PIPEMISC_BPC_8;
val |= PIPE_MISC_BPC_8;
break;
case 30:
val |= PIPEMISC_BPC_10;
val |= PIPE_MISC_BPC_10;
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
if (DISPLAY_VER(dev_priv) > 12)
val |= PIPEMISC_BPC_12_ADLP;
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
@ -3357,38 +3361,38 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
}
if (crtc_state->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
val |= PIPE_MISC_YUV420_ENABLE |
PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPEMISC_HDR_MODE_PRECISION;
val |= PIPE_MISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
}
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
switch (tmp & PIPEMISC_BPC_MASK) {
case PIPEMISC_BPC_6:
switch (tmp & PIPE_MISC_BPC_MASK) {
case PIPE_MISC_BPC_6:
return 18;
case PIPEMISC_BPC_8:
case PIPE_MISC_BPC_8:
return 24;
case PIPEMISC_BPC_10:
case PIPE_MISC_BPC_10:
return 30;
/*
* PORT OUTPUT 12 BPC defined for ADLP+.
@ -3400,7 +3404,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
* on older platforms, need to find a workaround for 12 BPC
* MIPI DSI HW readout.
*/
case PIPEMISC_BPC_12_ADLP:
case PIPE_MISC_BPC_12_ADLP:
if (DISPLAY_VER(dev_priv) > 12)
return 36;
fallthrough;
@ -3981,7 +3985,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
pipe_config->output_format =
bdw_get_pipemisc_output_format(crtc);
bdw_get_pipe_misc_output_format(crtc);
}
pipe_config->gamma_mode = intel_de_read(dev_priv,
@ -5079,6 +5083,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@ -5904,68 +5909,6 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
return 0;
}
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_display_mode adjusted_mode;
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (crtc_state->vrr.enable) {
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
}
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->mode_flags = crtc_state->mode_flags;
/*
* The scanline counter increments at the leading edge of hsync.
*
* On most platforms it starts counting from vtotal-1 on the
* first active line. That means the scanline counter value is
* always one less than what we would expect. Ie. just after
* start of vblank, which also occurs at start of hsync (on the
* last active line), the scanline counter will read vblank_start-1.
*
* On gen2 the scanline counter starts counting from 1 instead
* of vtotal-1, so we have to subtract one (or rather add vtotal-1
* to keep the value positive), instead of adding one.
*
* On HSW+ the behaviour of the scanline counter depends on the output
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*
* On VLV/CHV DSI the scanline counter would appear to increment
* approx. 1/3 of a scanline before start of vblank. Unfortunately
* that means we can't tell whether we're in vblank or not while
* we're on that particular line. We must still set scanline_offset
* to 1 so that the vblank timestamps come out correct when we query
* the scanline counter from within the vblank interrupt handler.
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
if (DISPLAY_VER(dev_priv) == 2) {
int vtotal;
vtotal = adjusted_mode.crtc_vtotal;
if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else {
crtc->scanline_offset = 1;
}
}
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@ -6970,7 +6913,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
intel_color_commit_arm(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
bdw_set_pipe_misc(new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
@ -7046,6 +6989,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset &&
intel_crtc_needs_color_update(new_crtc_state))
intel_color_commit_noarm(new_crtc_state);
@ -7413,8 +7358,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
/*
* During full modesets we write a lot of registers, wait
* for PLLs, etc. Doing that while DC states are enabled
* is not a good idea.
*
* During fastsets and other updates we also need to
* disable DC states due to the following scenario:
* 1. DC5 exit and PSR exit happen
* 2. Some or all _noarm() registers are written
* 3. Due to some long delay PSR is re-entered
* 4. DC5 entry -> DMC saves the already written new
* _noarm() registers and the old not yet written
* _arm() registers
* 5. DC5 exit -> DMC restores a mixture of old and
* new register values and arms the update
* 6. PSR exit -> hardware latches a mixture of old and
* new register values -> corrupted frame, or worse
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@ -7563,8 +7528,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@ -8886,14 +8851,14 @@ void intel_display_driver_register(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
intel_display_debugfs_register(i915);
/* Must be done after probing outputs */
intel_opregion_register(i915);
intel_acpi_video_register(i915);
intel_audio_init(i915);
intel_display_debugfs_register(i915);
/*
* Some ports require correctly set-up hpd registers for
* detection to work properly (leading to ghost connected

View File

@ -422,7 +422,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset);
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
void intel_plane_destroy(struct drm_plane *plane);
void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
@ -511,7 +510,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);

View File

@ -183,6 +183,17 @@ struct intel_hotplug {
* blocked behind the non-DP one.
*/
struct workqueue_struct *dp_wq;
/*
* Flag to track if long HPDs need not to be processed
*
* Some panels generate long HPDs while keep connected to the port.
* This can cause issues with CI tests results. In CI systems we
* don't expect to disconnect the panels and could ignore the long
* HPDs generated from the faulty panels. This flag can be used as
* cue to ignore the long HPDs and can be set / unset using debugfs.
*/
bool ignore_long_hpd;
};
struct intel_vbt_data {
@ -384,9 +395,15 @@ struct intel_display {
} gmbus;
struct {
struct i915_hdcp_comp_master *master;
struct i915_hdcp_master *master;
bool comp_added;
/*
* HDCP message struct for allocation of memory which can be
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
struct intel_hdcp_gsc_message *hdcp_message;
/* Mutex to protect the above hdcp component related values. */
struct mutex comp_mutex;
} hdcp;

View File

@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "i915_reg.h"
@ -48,33 +49,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
return 0;
}
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
if (!HAS_IPS(dev_priv))
return -ENODEV;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(dev_priv->params.enable_ips));
if (DISPLAY_VER(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
seq_puts(m, "Currently: enabled\n");
else
seq_puts(m, "Currently: disabled\n");
}
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@ -168,269 +142,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
u8 val;
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
"active, display from RFB",
"active, capture and display on sink device timings",
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
"sink internal error",
};
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
intel_attached_dp(to_intel_connector(connector));
int ret;
if (!CAN_PSR(intel_dp)) {
seq_puts(m, "PSR Unsupported\n");
return -ENODEV;
}
if (connector->status != connector_status_connected)
return -ENODEV;
ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
if (ret == 1) {
const char *str = "unknown";
val &= DP_PSR_SINK_STATE_MASK;
if (val < ARRAY_SIZE(sink_status))
str = sink_status[val];
seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
} else {
return ret;
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const char *status = "unknown";
u32 val, status_val;
if (intel_dp->psr.psr2_enabled) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
"CAPTURE_FS",
"SLEEP",
"BUFON_FW",
"ML_UP",
"SU_STANDBY",
"FAST_SLEEP",
"DEEP_SLEEP",
"BUF_ON",
"TG_ON"
};
val = intel_de_read(dev_priv,
EDP_PSR2_STATUS(intel_dp->psr.transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
} else {
static const char * const live_status[] = {
"IDLE",
"SRDONACK",
"SRDENT",
"BUFOFF",
"BUFON",
"AUXACK",
"SRDOFFACK",
"SRDENT_ON",
};
val = intel_de_read(dev_priv,
EDP_PSR_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
}
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
const char *status;
bool enabled;
u32 val;
seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
seq_puts(m, "\n");
if (!psr->sink_support)
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
if (!psr->enabled) {
seq_printf(m, "PSR sink not reliable: %s\n",
str_yes_no(psr->sink_not_reliable));
goto unlock;
}
if (psr->psr2_enabled) {
val = intel_de_read(dev_priv,
EDP_PSR2_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv,
EDP_PSR_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = intel_de_read(dev_priv,
EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
if (psr->debug & I915_PSR_DEBUG_IRQ) {
seq_printf(m, "Last attempted entry at: %lld\n",
psr->last_entry_attempt);
seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
}
if (psr->psr2_enabled) {
u32 su_frames_val[3];
int frame;
/*
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
val = intel_de_read(dev_priv,
PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
su_frames_val[frame / 3] = val;
}
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
u32 su_blocks;
su_blocks = su_frames_val[frame / 3] &
PSR2_SU_STATUS_MASK(frame);
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
seq_printf(m, "%d\t%d\n", frame, su_blocks);
}
seq_printf(m, "PSR2 selective fetch: %s\n",
str_enabled_disabled(psr->psr2_sel_fetch_enabled));
}
unlock:
mutex_unlock(&psr->lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_dp *intel_dp = NULL;
struct intel_encoder *encoder;
if (!HAS_PSR(dev_priv))
return -ENODEV;
/* Find the first EDP which supports PSR */
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
intel_dp = enc_to_intel_dp(encoder);
break;
}
if (!intel_dp)
return -ENODEV;
return intel_psr_status(m, intel_dp);
}
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct intel_encoder *encoder;
intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(dev_priv))
return ret;
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
// TODO: split to each transcoder's PSR debug state
ret = intel_psr_debug_set(intel_dp, val);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
return ret;
}
static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
struct intel_encoder *encoder;
if (!HAS_PSR(dev_priv))
return -ENODEV;
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
// TODO: split to each transcoder's PSR debug state
*val = READ_ONCE(intel_dp->psr.debug);
return 0;
}
return -ENODEV;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@ -831,10 +542,10 @@ static const struct file_operations crtc_updates_fops = {
.write = crtc_updates_write
};
static void crtc_updates_add(struct drm_crtc *crtc)
static void crtc_updates_add(struct intel_crtc *crtc)
{
debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
to_intel_crtc(crtc), &crtc_updates_fops);
debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry,
crtc, &crtc_updates_fops);
}
#else
@ -844,7 +555,7 @@ static void crtc_updates_info(struct seq_file *m,
{
}
static void crtc_updates_add(struct drm_crtc *crtc)
static void crtc_updates_add(struct intel_crtc *crtc)
{
}
#endif
@ -1342,12 +1053,10 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_opregion", i915_opregion, 0},
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
@ -1364,7 +1073,6 @@ static const struct {
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
void intel_display_debugfs_register(struct drm_i915_private *i915)
@ -1384,9 +1092,11 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
hsw_ips_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
}
@ -1439,16 +1149,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_psr_status_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
intel_attached_dp(to_intel_connector(connector));
return intel_psr_status(m, intel_dp);
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@ -1666,7 +1366,7 @@ static const struct file_operations i915_dsc_bpc_fops = {
*/
static int i915_current_bpc_show(struct seq_file *m, void *data)
{
struct intel_crtc *crtc = to_intel_crtc(m->private);
struct intel_crtc *crtc = m->private;
struct intel_crtc_state *crtc_state;
int ret;
@ -1683,6 +1383,17 @@ static int i915_current_bpc_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
/* Pipe may differ from crtc index if pipes are fused off */
static int intel_crtc_pipe_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
seq_printf(m, "%c\n", pipe_name(crtc->pipe));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@ -1701,19 +1412,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
return;
intel_drrs_connector_debugfs_add(intel_connector);
intel_psr_connector_debugfs_add(intel_connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
connector, &i915_panel_fops);
debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
connector, &i915_psr_sink_status_fops);
}
if (HAS_PSR(dev_priv) &&
connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
@ -1748,15 +1451,19 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
*
* Failure to add debugfs entries should generally be ignored.
*/
void intel_crtc_debugfs_add(struct drm_crtc *crtc)
void intel_crtc_debugfs_add(struct intel_crtc *crtc)
{
if (!crtc->debugfs_entry)
struct dentry *root = crtc->base.debugfs_entry;
if (!root)
return;
crtc_updates_add(crtc);
intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc));
intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
intel_drrs_crtc_debugfs_add(crtc);
intel_fbc_crtc_debugfs_add(crtc);
debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
debugfs_create_file("i915_current_bpc", 0444, root, crtc,
&i915_current_bpc_fops);
debugfs_create_file("i915_pipe", 0444, root, crtc,
&intel_crtc_pipe_fops);
}

View File

@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
struct drm_crtc;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
void intel_connector_debugfs_add(struct intel_connector *connector);
void intel_crtc_debugfs_add(struct drm_crtc *crtc);
void intel_crtc_debugfs_add(struct intel_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */

View File

@ -1625,6 +1625,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
if (DISPLAY_VER(dev_priv) == 14)
intel_de_rmw(dev_priv, DC_STATE_EN,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
/* 4. Enable CDCLK. */
intel_cdclk_init_hw(dev_priv);
@ -1678,6 +1682,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
intel_cdclk_uninit_hw(dev_priv);
if (DISPLAY_VER(dev_priv) == 14)
intel_de_rmw(dev_priv, DC_STATE_EN, 0,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
/*
* 4. Disable Power Well 1 (PG1).
* The AUX IO power wells are toggled on demand, so they are already

View File

@ -43,7 +43,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <drm/i915_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_vma.h"
@ -255,6 +255,11 @@ struct intel_encoder {
* Returns whether the port clock is enabled or not.
*/
bool (*is_clock_enabled)(struct intel_encoder *encoder);
/*
* Returns the PLL type the port uses.
*/
enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
@ -1783,6 +1788,7 @@ struct intel_digital_port {
bool tc_legacy_port:1;
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum tc_port_mode tc_init_mode;
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;

View File

@ -89,10 +89,13 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
__stringify(major) "_" \
__stringify(minor) ".bin"
#define XELPDP_DMC_MAX_FW_SIZE 0x7000
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
#define MTL_DMC_PATH DMC_PATH(mtl)
MODULE_FIRMWARE(MTL_DMC_PATH);
#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
@ -424,15 +427,12 @@ static void disable_all_event_handlers(struct drm_i915_private *i915)
}
}
static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
if (DISPLAY_VER(i915) < 13)
return;
/*
* Wa_16015201720:adl-p,dg2, mtl
* Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
* For pipe C and D clock gating needs to be disabled only
@ -448,6 +448,25 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
{
/*
* Wa_16015201720
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
*/
intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
}
static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
if (DISPLAY_VER(i915) >= 14 && enable)
mtl_pipedmc_clock_gating_wa(i915);
else if (DISPLAY_VER(i915) == 13)
adlp_pipedmc_clock_gating_wa(i915, enable);
}
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{
enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
@ -979,7 +998,10 @@ void intel_dmc_init(struct drm_i915_private *i915)
INIT_WORK(&dmc->work, dmc_load_work_fn);
if (IS_DG2(i915)) {
if (IS_METEORLAKE(i915)) {
dmc->fw_path = MTL_DMC_PATH;
dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
} else if (IS_DG2(i915)) {
dmc->fw_path = DG2_DMC_PATH;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
} else if (IS_ALDERLAKE_P(i915)) {

View File

@ -687,6 +687,12 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
if (DISPLAY_VER(i915) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
/*
* According to BSpec, 27 is the max DSC output bpp,
* 8 is the min DSC output bpp
*/
bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
} else {
/* Find the nearest match in the array of known BPPs from VESA */
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
@ -716,9 +722,19 @@ u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
* (LinkSymbolClock)* 8 * (TimeSlots / 64)
* for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
* for MST -> TimeSlots has to be calculated, based on mode requirements
*
* Due to FEC overhead, the available bw is reduced to 97.2261%.
* To support the given mode:
* Bandwidth required should be <= Available link Bandwidth * FEC Overhead
* =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
* =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
* =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
* (ModeClock / FEC Overhead)
* =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
* (ModeClock / FEC Overhead * 8)
*/
bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots,
intel_dp_mode_to_fec_clock(mode_clock) * 8);
bits_per_pixel = ((link_clock * lane_count) * timeslots) /
(intel_dp_mode_to_fec_clock(mode_clock) * 8);
drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
@ -771,6 +787,13 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
min_slice_count = DIV_ROUND_UP(mode_clock,
DP_DSC_MAX_ENC_THROUGHPUT_1);
/*
* Due to some DSC engine BW limitations, we need to enable second
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
min_slice_count = max_t(u8, min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
drm_dbg_kms(&i915->drm,
@ -1597,16 +1620,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
pipe_config->bigjoiner_pipes) {
if (pipe_config->dsc.slice_count > 1) {
pipe_config->dsc.dsc_split = true;
} else {
drm_dbg_kms(&dev_priv->drm,
"Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
}
if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
pipe_config->dsc.dsc_split = true;
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {

View File

@ -205,8 +205,19 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
if (is_tc_port)
if (is_tc_port) {
intel_tc_port_lock(dig_port);
/*
* Abort transfers on a disconnected port as required by
* DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
* timeouts that would otherwise happen.
* TODO: abort the transfer on non-TC ports as well.
*/
if (!intel_tc_port_connected_locked(&dig_port->base)) {
ret = -ENXIO;
goto out_unlock;
}
}
aux_domain = intel_aux_power_domain(dig_port);
@ -367,7 +378,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
intel_pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
out_unlock:
if (is_tc_port)
intel_tc_port_unlock(dig_port);

View File

@ -1379,10 +1379,6 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
}
}
/* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state);
return true;
}
@ -1433,7 +1429,11 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool passed;
/*
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
@ -1451,6 +1451,46 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
else
passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
/*
* Ignore the link failure in CI
*
* In fixed enviroments like CI, sometimes unexpected long HPDs are
* generated by the displays. If ignore_long_hpd flag is set, such long
* HPDs are ignored. And probably as a consequence of these ignored
* long HPDs, subsequent link trainings are failed resulting into CI
* execution failures.
*
* For test cases which rely on the link training or processing of HPDs
* ignore_long_hpd flag can unset from the testcase.
*/
if (!passed && i915->display.hotplug.ignore_long_hpd) {
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
return;
}
if (!passed)
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}
void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/*
* VIDEO_DIP_CTL register bit 31 should be set to '0' to not
* disable SDP CRC. This is applicable for Display version 13.
* Default value of bit 31 is '0' hence discarding the write
* TODO: Corrective actions on SDP corruption yet to be defined
*/
if (intel_dp_is_uhbr(crtc_state))
/* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SDP_ERROR_DETECTION_CONFIGURATION,
DP_SDP_CRC16_128B132B_EN);
drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
}

View File

@ -39,4 +39,6 @@ static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
}
void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_DP_LINK_TRAINING_H__ */

View File

@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
bool prealloc = false;
void __iomem *vaddr;
struct drm_i915_gem_object *obj;
struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = vma->size;
}
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
drm_err(&dev_priv->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
goto out_unpin;
for_i915_gem_ww(&ww, ret, false) {
ret = i915_gem_object_lock(vma->obj, &ww);
if (ret)
continue;
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
drm_err(&dev_priv->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
}
}
if (ret)
goto out_unpin;
info->screen_base = vaddr;
info->screen_size = vma->size;

View File

@ -845,9 +845,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E),
DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK,
DP_TP_CTL_LINK_TRAIN_PAT1);
intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);

View File

@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
@ -203,13 +204,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct intel_gt *gt = dev_priv->media_gt;
struct intel_gsc_uc *gsc = &gt->uc.gsc;
bool capable = false;
/* I915 support for HDCP2.2 */
if (!hdcp->hdcp2_supported)
return false;
/* MEI interface is solid */
/* If MTL+ make sure gsc is loaded and proxy is setup */
if (intel_hdcp_gsc_cs_required(dev_priv))
if (!intel_uc_fw_is_running(&gsc->fw))
return false;
/* MEI/GSC interface is solid depending on which is used */
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@ -1142,18 +1150,18 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
@ -1172,18 +1180,18 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
@ -1200,18 +1208,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@ -1226,18 +1234,18 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
@ -1253,18 +1261,18 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
@ -1280,18 +1288,18 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
@ -1306,18 +1314,18 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
@ -1335,20 +1343,21 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
rep_topology,
rep_send_ack);
ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
data,
rep_topology,
rep_send_ack);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
@ -1364,18 +1373,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@ -1388,18 +1397,18 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
@ -1408,22 +1417,22 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
return ret;
}
static int hdcp2_close_mei_session(struct intel_connector *connector)
static int hdcp2_close_session(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct i915_hdcp_comp_master *comp;
struct i915_hdcp_master *arbiter;
int ret;
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
comp = dev_priv->display.hdcp.master;
arbiter = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
if (!arbiter || !arbiter->ops) {
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
&dig_port->hdcp_port_data);
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
@ -1432,7 +1441,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
static int hdcp2_deauthenticate_port(struct intel_connector *connector)
{
return hdcp2_close_mei_session(connector);
return hdcp2_close_session(connector);
}
/* Authentication flow starts from here */
@ -2142,8 +2151,8 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
dev_priv->display.hdcp.master->mei_dev = mei_kdev;
dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
@ -2160,30 +2169,30 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
static const struct component_ops i915_hdcp_component_ops = {
static const struct component_ops i915_hdcp_ops = {
.bind = i915_hdcp_component_bind,
.unbind = i915_hdcp_component_unbind,
};
static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
{
switch (port) {
case PORT_A:
return MEI_DDI_A;
return HDCP_DDI_A;
case PORT_B ... PORT_F:
return (enum mei_fw_ddi)port;
return (enum hdcp_ddi)port;
default:
return MEI_DDI_INVALID_PORT;
return HDCP_DDI_INVALID_PORT;
}
}
static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
{
switch (cpu_transcoder) {
case TRANSCODER_A ... TRANSCODER_D:
return (enum mei_fw_tc)(cpu_transcoder | 0x10);
return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
default: /* eDP, DSI TRANSCODERS are non HDCP capable */
return MEI_INVALID_TRANSCODER;
return HDCP_INVALID_TRANSCODER;
}
}
@ -2197,20 +2206,20 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
enum port port = dig_port->base.port;
if (DISPLAY_VER(dev_priv) < 12)
data->fw_ddi = intel_get_mei_fw_ddi_index(port);
data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
* As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
* with zero(INVALID PORT index).
*/
data->fw_ddi = MEI_DDI_INVALID_PORT;
data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
/*
* As associated transcoder is set and modified at modeset, here fw_tc
* As associated transcoder is set and modified at modeset, here hdcp_transcoder
* is initialized to zero (invalid transcoder index). This will be
* retained for <Gen12 forever.
*/
data->fw_tc = MEI_INVALID_TRANSCODER;
data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
@ -2232,6 +2241,9 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
{
if (intel_hdcp_gsc_cs_required(dev_priv))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
@ -2253,10 +2265,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = true;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (intel_hdcp_gsc_cs_required(dev_priv))
ret = intel_hdcp_gsc_init(dev_priv);
else
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
ret);
mutex_lock(&dev_priv->display.hdcp.comp_mutex);
dev_priv->display.hdcp.comp_added = false;
@ -2347,7 +2363,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
}
if (DISPLAY_VER(dev_priv) >= 12)
dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
dig_port->hdcp_port_data.hdcp_transcoder =
intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@ -2482,7 +2499,10 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
dev_priv->display.hdcp.comp_added = false;
mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
if (intel_hdcp_gsc_cs_required(dev_priv))
intel_hdcp_gsc_fini(dev_priv);
else
component_del(dev_priv->drm.dev, &i915_hdcp_ops);
}
void intel_hdcp_cleanup(struct intel_connector *connector)

View File

@ -0,0 +1,831 @@
// SPDX-License-Identifier: MIT
/*
* Copyright 2023, Intel Corporation.
*/
#include <drm/i915_hdcp_interface.h>
#include "display/intel_hdcp_gsc.h"
#include "gem/i915_gem_region.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
#include "i915_utils.h"
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 14;
}
static int
gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } };
struct wired_cmd_initiate_hdcp2_session_out
session_init_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_init_in.header.buffer_len =
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
session_init_in.port.physical_port = (u8)data->hdcp_ddi;
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
sizeof(session_init_in),
(u8 *)&session_init_out,
sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_INITIATE_HDCP2_SESSION,
session_init_out.header.status);
return -EIO;
}
ake_data->msg_id = HDCP_2_2_AKE_INIT;
ake_data->tx_caps = session_init_out.tx_caps;
memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN);
return 0;
}
static int
gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
bool *km_stored,
struct hdcp2_ake_no_stored_km
*ek_pub_km,
size_t *msg_sz)
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } };
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_rxcert_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
sizeof(verify_rxcert_in),
(u8 *)&verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
return byte;
}
if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_VERIFY_RECEIVER_CERT,
verify_rxcert_out.header.status);
return -EIO;
}
*km_stored = !!verify_rxcert_out.km_stored;
if (verify_rxcert_out.km_stored) {
ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM;
*msg_sz = sizeof(struct hdcp2_ake_stored_km);
} else {
ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM;
*msg_sz = sizeof(struct hdcp2_ake_no_stored_km);
}
memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff,
sizeof(verify_rxcert_out.ekm_buff));
return 0;
}
static int
gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } };
struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
sizeof(send_hprime_in),
(u8 *)&send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
return -EIO;
}
return 0;
}
static int
gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } };
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
pairing_info_in.header.buffer_len =
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
sizeof(pairing_info_in),
(u8 *)&pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
WIRED_AKE_SEND_PAIRING_INFO,
pairing_info_out.header.status);
return -EIO;
}
return 0;
}
static int
gsc_hdcp_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } };
struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
(u8 *)&lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
return -EIO;
}
lc_init_data->msg_id = HDCP_2_2_LC_INIT;
memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN);
return 0;
}
static int
gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } };
struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_lprime_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
sizeof(verify_lprime_in),
(u8 *)&verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_VALIDATE_LOCALITY,
verify_lprime_out.header.status);
return -EIO;
}
return 0;
}
static int gsc_hdcp_get_session_key(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = { { 0 } };
struct wired_cmd_get_session_key_out get_skey_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
(u8 *)&get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_GET_SESSION_KEY, get_skey_out.header.status);
return -EIO;
}
ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS;
memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks,
HDCP_2_2_E_DKEY_KS_LEN);
memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN);
return 0;
}
static int
gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
*rep_topology,
struct hdcp2_rep_send_ack
*rep_send_ack)
{
struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } };
struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_repeater_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v,
HDCP_2_2_SEQ_NUM_LEN);
memcpy(verify_repeater_in.v_prime, rep_topology->v_prime,
HDCP_2_2_V_PRIME_HALF_LEN);
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
sizeof(verify_repeater_in),
(u8 *)&verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_VERIFY_REPEATER,
verify_repeater_out.header.status);
return -EIO;
}
memcpy(rep_send_ack->v, verify_repeater_out.v,
HDCP_2_2_V_PRIME_HALF_LEN);
rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK;
return 0;
}
static int gsc_hdcp_verify_mprime(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out
verify_mprime_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
if (!dev || !stream_ready || !data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
return -EINVAL;
verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL);
if (!verify_mprime_in)
return -ENOMEM;
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
memcpy(verify_mprime_in->streams, data->streams,
array_size(data->k, sizeof(*data->streams)));
verify_mprime_in->k = cpu_to_be16(data->k);
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
(u8 *)&verify_mprime_out,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_REPEATER_AUTH_STREAM_REQ,
verify_mprime_out.header.status);
return -EIO;
}
return 0;
}
static int gsc_hdcp_enable_authentication(struct device *dev,
struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } };
struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
sizeof(enable_auth_in),
(u8 *)&enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_ENABLE_AUTH, enable_auth_out.header.status);
return -EIO;
}
return 0;
}
static int
gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = { { 0 } };
struct wired_cmd_close_session_out session_close_out = { { 0 } };
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
i915 = kdev_to_i915(dev);
if (!i915) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_close_in.header.buffer_len =
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
sizeof(session_close_in),
(u8 *)&session_close_out,
sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n",
session_close_out.header.status);
return -EIO;
}
return 0;
}
static const struct i915_hdcp_ops gsc_hdcp_ops = {
.initiate_hdcp2_session = gsc_hdcp_initiate_session,
.verify_receiver_cert_prepare_km =
gsc_hdcp_verify_receiver_cert_prepare_km,
.verify_hprime = gsc_hdcp_verify_hprime,
.store_pairing_info = gsc_hdcp_store_pairing_info,
.initiate_locality_check = gsc_hdcp_initiate_locality_check,
.verify_lprime = gsc_hdcp_verify_lprime,
.get_session_key = gsc_hdcp_get_session_key,
.repeater_check_flow_prepare_ack =
gsc_hdcp_repeater_check_flow_prepare_ack,
.verify_mprime = gsc_hdcp_verify_mprime,
.enable_hdcp_authentication = gsc_hdcp_enable_authentication,
.close_hdcp_session = gsc_hdcp_close_session,
};
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
struct intel_hdcp_gsc_message *hdcp_message)
{
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
struct i915_vma *vma = NULL;
void *cmd;
int err;
/* allocate object of one page for HDCP command memory and store it */
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
return PTR_ERR(obj);
}
cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
if (IS_ERR(cmd)) {
drm_err(&i915->drm, "Failed to map gsc message page!\n");
err = PTR_ERR(cmd);
goto out_unpin;
}
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_unmap;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto out_unmap;
memset(cmd, 0, obj->base.size);
hdcp_message->hdcp_cmd = cmd;
hdcp_message->vma = vma;
return 0;
out_unmap:
i915_gem_object_unpin_map(obj);
out_unpin:
i915_gem_object_put(obj);
return err;
}
static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
{
struct intel_hdcp_gsc_message *hdcp_message;
int ret;
hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
if (!hdcp_message)
return -ENOMEM;
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
i915->display.hdcp.hdcp_message = hdcp_message;
ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
if (ret)
drm_err(&i915->drm, "Could not initialize hdcp_message\n");
return ret;
}
static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
{
struct intel_hdcp_gsc_message *hdcp_message =
i915->display.hdcp.hdcp_message;
i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
kfree(hdcp_message);
}
int intel_hdcp_gsc_init(struct drm_i915_private *i915)
{
struct i915_hdcp_master *data;
int ret;
data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL);
if (!data)
return -ENOMEM;
mutex_lock(&i915->display.hdcp.comp_mutex);
i915->display.hdcp.master = data;
i915->display.hdcp.master->hdcp_dev = i915->drm.dev;
i915->display.hdcp.master->ops = &gsc_hdcp_ops;
ret = intel_hdcp_gsc_hdcp2_init(i915);
mutex_unlock(&i915->display.hdcp.comp_mutex);
return ret;
}
void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
{
intel_hdcp_gsc_free_message(i915);
kfree(i915->display.hdcp.master);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
struct intel_gsc_mtl_header *header, u64 addr,
size_t msg_out_len)
{
struct intel_gt *gt = i915->media_gt;
int ret;
header->flags = 0;
ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
header->message_size,
addr,
msg_out_len + sizeof(*header));
if (ret) {
drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
}
/*
* Checking validity marker for memory sanity
*/
if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
drm_err(&i915->drm, "invalid validity marker\n");
return -EINVAL;
}
if (header->status != 0) {
drm_err(&i915->drm, "header status indicates error %d\n",
header->status);
return -EINVAL;
}
if (header->flags & GSC_OUTFLAG_MSG_PENDING)
return -EAGAIN;
return 0;
}
/*
* This function can now be used for sending requests and will also handle
* receipt of reply messages hence no different function of message retrieval
* is required. We will initialize intel_hdcp_gsc_message structure then add
* gsc cs memory header as stated in specs after which the normal HDCP payload
* will follow
*/
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len)
{
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_mtl_header *header;
const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
struct intel_hdcp_gsc_message *hdcp_message;
u64 addr, host_session_id;
u32 reply_size, msg_size;
int ret, tries = 0;
if (!intel_uc_uses_gsc_uc(&gt->uc))
return -ENODEV;
if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
return -ENOSPC;
hdcp_message = i915->display.hdcp.hdcp_message;
header = hdcp_message->hdcp_cmd;
addr = i915_ggtt_offset(hdcp_message->vma);
msg_size = msg_in_len + sizeof(*header);
memset(header, 0, msg_size);
get_random_bytes(&host_session_id, sizeof(u64));
intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
msg_size, host_session_id);
memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
* message handle as we are using same address hence loc. of header is
* same and it will contain the message handle. we will send the message
* 20 times each message 50 ms apart
*/
do {
ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
/* Only try again if gsc says so */
if (ret != -EAGAIN)
break;
msleep(50);
} while (++tries < 20);
if (ret)
goto err;
/* we use the same mem for the reply, so header is in the same loc */
reply_size = header->message_size - sizeof(*header);
if (reply_size > msg_out_len) {
drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
reply_size, (u32)msg_out_len);
reply_size = msg_out_len;
} else if (reply_size != msg_out_len) {
drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n",
reply_size, (u32)msg_out_len);
}
memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
err:
return ret;
}

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
#include <linux/err.h>
#include <linux/types.h>
struct drm_i915_private;
struct intel_hdcp_gsc_message {
struct i915_vma *vma;
void *hdcp_cmd;
};
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len);
int intel_hdcp_gsc_init(struct drm_i915_private *i915);
void intel_hdcp_gsc_fini(struct drm_i915_private *i915);
#endif /* __INTEL_HDCP_GCS_H__ */

View File

@ -389,6 +389,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
if (dev_priv->display.hotplug.ignore_long_hpd) {
drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
mutex_unlock(&dev_priv->drm.mode_config.mutex);
return;
}
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@ -940,4 +947,6 @@ void intel_hpd_debugfs_register(struct drm_i915_private *i915)
i915, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
i915, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
&i915->display.hotplug.ignore_long_hpd);
}

View File

@ -26,6 +26,7 @@
#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
#include "intel_vblank.h"
#include "intel_wm.h"
#include "skl_watermark.h"

View File

@ -1159,13 +1159,10 @@ void intel_opregion_register(struct drm_i915_private *i915)
intel_opregion_resume(i915);
}
void intel_opregion_resume(struct drm_i915_private *i915)
static void intel_opregion_resume_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
intel_didl_outputs(i915);
intel_setup_cadls(i915);
@ -1186,10 +1183,34 @@ void intel_opregion_resume(struct drm_i915_private *i915)
/* Some platforms abuse the _DSM to enable MUX */
intel_dsm_get_bios_data_funcs_supported(i915);
}
void intel_opregion_resume(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
if (HAS_DISPLAY(i915))
intel_opregion_resume_display(i915);
intel_opregion_notify_adapter(i915, PCI_D0);
}
static void intel_opregion_suspend_display(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
cancel_work_sync(&i915->display.opregion.asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
}
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
struct intel_opregion *opregion = &i915->display.opregion;
@ -1199,13 +1220,8 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
intel_opregion_notify_adapter(i915, state);
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
cancel_work_sync(&i915->display.opregion.asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
if (HAS_DISPLAY(i915))
intel_opregion_suspend_display(i915);
}
void intel_opregion_unregister(struct drm_i915_private *i915)
@ -1221,6 +1237,14 @@ void intel_opregion_unregister(struct drm_i915_private *i915)
unregister_acpi_notifier(&opregion->acpi_notifier);
opregion->acpi_notifier.notifier_call = NULL;
}
}
void intel_opregion_cleanup(struct drm_i915_private *i915)
{
struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
/* just clear all opregion memory pointers now */
memunmap(opregion->header);

View File

@ -60,6 +60,7 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
void intel_opregion_cleanup(struct drm_i915_private *i915);
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
@ -85,6 +86,10 @@ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
return 0;
}
static inline void intel_opregion_cleanup(struct drm_i915_private *i915)
{
}
static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
{
}

View File

@ -2644,3 +2644,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
break;
}
}
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const char *status = "unknown";
u32 val, status_val;
if (intel_dp->psr.psr2_enabled) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
"CAPTURE_FS",
"SLEEP",
"BUFON_FW",
"ML_UP",
"SU_STANDBY",
"FAST_SLEEP",
"DEEP_SLEEP",
"BUF_ON",
"TG_ON"
};
val = intel_de_read(dev_priv,
EDP_PSR2_STATUS(intel_dp->psr.transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
} else {
static const char * const live_status[] = {
"IDLE",
"SRDONACK",
"SRDENT",
"BUFOFF",
"BUFON",
"AUXACK",
"SRDOFFACK",
"SRDENT_ON",
};
val = intel_de_read(dev_priv,
EDP_PSR_STATUS(intel_dp->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
}
seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
const char *status;
bool enabled;
u32 val;
seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
seq_puts(m, "\n");
if (!psr->sink_support)
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
if (!psr->enabled) {
seq_printf(m, "PSR sink not reliable: %s\n",
str_yes_no(psr->sink_not_reliable));
goto unlock;
}
if (psr->psr2_enabled) {
val = intel_de_read(dev_priv,
EDP_PSR2_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv,
EDP_PSR_CTL(intel_dp->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = intel_de_read(dev_priv,
EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
if (psr->debug & I915_PSR_DEBUG_IRQ) {
seq_printf(m, "Last attempted entry at: %lld\n",
psr->last_entry_attempt);
seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
}
if (psr->psr2_enabled) {
u32 su_frames_val[3];
int frame;
/*
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
val = intel_de_read(dev_priv,
PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
su_frames_val[frame / 3] = val;
}
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
u32 su_blocks;
su_blocks = su_frames_val[frame / 3] &
PSR2_SU_STATUS_MASK(frame);
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
seq_printf(m, "%d\t%d\n", frame, su_blocks);
}
seq_printf(m, "PSR2 selective fetch: %s\n",
str_enabled_disabled(psr->psr2_sel_fetch_enabled));
}
unlock:
mutex_unlock(&psr->lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
static int i915_edp_psr_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
struct intel_dp *intel_dp = NULL;
struct intel_encoder *encoder;
if (!HAS_PSR(dev_priv))
return -ENODEV;
/* Find the first EDP which supports PSR */
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
intel_dp = enc_to_intel_dp(encoder);
break;
}
if (!intel_dp)
return -ENODEV;
return intel_psr_status(m, intel_dp);
}
DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct intel_encoder *encoder;
intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(dev_priv))
return ret;
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
// TODO: split to each transcoder's PSR debug state
ret = intel_psr_debug_set(intel_dp, val);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
return ret;
}
static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
struct intel_encoder *encoder;
if (!HAS_PSR(dev_priv))
return -ENODEV;
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
// TODO: split to each transcoder's PSR debug state
*val = READ_ONCE(intel_dp->psr.debug);
return 0;
}
return -ENODEV;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
void intel_psr_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
i915, &i915_edp_psr_debug_fops);
debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
i915, &i915_edp_psr_status_fops);
}
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
static const char * const sink_status[] = {
"inactive",
"transition to active, capture and display",
"active, display from RFB",
"active, capture and display on sink device timings",
"transition to inactive, capture and display, timing re-sync",
"reserved",
"reserved",
"sink internal error",
};
const char *str;
int ret;
u8 val;
if (!CAN_PSR(intel_dp)) {
seq_puts(m, "PSR Unsupported\n");
return -ENODEV;
}
if (connector->base.status != connector_status_connected)
return -ENODEV;
ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
if (ret != 1)
return ret < 0 ? ret : -EIO;
val &= DP_PSR_SINK_STATE_MASK;
if (val < ARRAY_SIZE(sink_status))
str = sink_status[val];
else
str = "unknown";
seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
static int i915_psr_status_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
return intel_psr_status(m, intel_dp);
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
if (HAS_PSR(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}

View File

@ -13,6 +13,7 @@ struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
struct intel_dp;
@ -61,5 +62,7 @@ void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_PSR_H__ */

View File

@ -32,85 +32,20 @@
#include <linux/string_helpers.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
#include "intel_vrr.h"
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
* FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
* abuses hsub/vsub so we can't use them here. But as they
* are limited to 32bpp RGB formats we don't actually need
* to check anything.
*/
if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
return 0;
/*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src->x1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
hsub = 2;
vsub = 2;
} else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
}
if (rotated)
hsub = vsub = max(hsub, vsub);
if (src_x % hsub || src_w % hsub) {
drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
src_x, src_w, hsub, str_yes_no(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
src_y, src_h, vsub, str_yes_no(rotated));
return -EINVAL;
}
return 0;
}
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
@ -1449,124 +1384,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
{
return DISPLAY_VER(dev_priv) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
/*
* We want src key enabled on the
* sprite and not on the primary.
*/
if (plane->id == PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_SOURCE)
key->flags = 0;
/*
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
/* ignore the pointless "none" flag */
set->flags &= ~I915_SET_COLORKEY_NONE;
if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
plane = drm_plane_find(dev, file_priv, set->plane_id);
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
/*
* SKL+ only plane 2 can do destination keying against plane 1.
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
if (DISPLAY_VER(dev_priv) >= 9 &&
to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(plane->dev);
if (!state) {
ret = -ENOMEM;
goto out;
}
state->acquire_ctx = &ctx;
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret)
intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
/*
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(dev_priv,
to_intel_plane(plane)->pipe);
plane_state = drm_atomic_get_plane_state(state,
crtc->base.primary);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret)
intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
}
if (!ret)
ret = drm_atomic_commit(state);
if (ret != -EDEADLK)
break;
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
}
drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
static const u32 g4x_sprite_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,

View File

@ -0,0 +1,127 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_sprite_uapi.h"
static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
{
return DISPLAY_VER(dev_priv) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
/*
* We want src key enabled on the
* sprite and not on the primary.
*/
if (plane->id == PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_SOURCE)
key->flags = 0;
/*
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret = 0;
/* ignore the pointless "none" flag */
set->flags &= ~I915_SET_COLORKEY_NONE;
if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
plane = drm_plane_find(dev, file_priv, set->plane_id);
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
/*
* SKL+ only plane 2 can do destination keying against plane 1.
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
if (DISPLAY_VER(dev_priv) >= 9 &&
to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(plane->dev);
if (!state) {
ret = -ENOMEM;
goto out;
}
state->acquire_ctx = &ctx;
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret)
intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
/*
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(dev_priv,
to_intel_plane(plane)->pipe);
plane_state = drm_atomic_get_plane_state(state,
crtc->base.primary);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret)
intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
}
if (!ret)
ret = drm_atomic_commit(state);
if (ret != -EDEADLK)
break;
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
}
drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_SPRITE_UAPI_H__
#define __INTEL_SPRITE_UAPI_H__
struct drm_device;
struct drm_file;
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif /* __INTEL_SPRITE_UAPI_H__ */

View File

@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power_map.h"
@ -118,6 +119,24 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, !enabled);
}
static enum intel_display_power_domain
tc_port_power_domain(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
static void
assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
drm_WARN_ON(&i915->drm,
!intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@ -418,9 +437,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assume safe mode\n",
"Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name);
return true;
return false;
}
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
@ -464,7 +483,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
u32 live_status_mask;
int max_lanes;
if (!tc_phy_status_complete(dig_port)) {
if (!tc_phy_status_complete(dig_port) &&
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
dig_port->tc_port_name);
goto out_set_tbt_alt_mode;
@ -539,62 +559,171 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
}
}
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port,
bool phy_is_ready, bool phy_is_owned)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
if (!tc_phy_status_complete(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
dig_port->tc_port_name);
return dig_port->tc_mode == TC_PORT_TBT_ALT;
drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
static bool tc_phy_is_connected(struct intel_digital_port *dig_port,
enum icl_port_dpll_id port_pll_type)
{
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool phy_is_ready = tc_phy_status_complete(dig_port);
bool phy_is_owned = tc_phy_is_owned(dig_port);
bool is_connected;
if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned))
is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
drm_dbg_kms(&i915->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
dig_port->tc_port_name,
str_yes_no(is_connected),
str_yes_no(phy_is_ready),
str_yes_no(phy_is_owned),
port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
return is_connected;
}
static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
if (wait_for(tc_phy_status_complete(dig_port), 100))
drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
dig_port->tc_port_name);
}
static enum tc_port_mode
hpd_mask_to_tc_mode(u32 live_status_mask)
{
if (live_status_mask)
return fls(live_status_mask) - 1;
return TC_PORT_DISCONNECTED;
}
static enum tc_port_mode
tc_phy_hpd_live_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
return hpd_mask_to_tc_mode(live_status_mask);
}
static enum tc_port_mode
get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port,
enum tc_port_mode live_mode)
{
switch (live_mode) {
case TC_PORT_LEGACY:
case TC_PORT_DP_ALT:
return live_mode;
default:
MISSING_CASE(live_mode);
fallthrough;
case TC_PORT_TBT_ALT:
case TC_PORT_DISCONNECTED:
if (dig_port->tc_legacy_port)
return TC_PORT_LEGACY;
else
return TC_PORT_DP_ALT;
}
}
/* On ADL-P the PHY complete flag is set in TBT mode as well. */
if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
return true;
if (!tc_phy_is_owned(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
dig_port->tc_port_name);
return false;
static enum tc_port_mode
get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port,
enum tc_port_mode live_mode)
{
switch (live_mode) {
case TC_PORT_LEGACY:
return TC_PORT_DISCONNECTED;
case TC_PORT_DP_ALT:
case TC_PORT_TBT_ALT:
return TC_PORT_TBT_ALT;
default:
MISSING_CASE(live_mode);
fallthrough;
case TC_PORT_DISCONNECTED:
if (dig_port->tc_legacy_port)
return TC_PORT_DISCONNECTED;
else
return TC_PORT_TBT_ALT;
}
return dig_port->tc_mode == TC_PORT_DP_ALT ||
dig_port->tc_mode == TC_PORT_LEGACY;
}
static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 live_status_mask = tc_port_live_status_mask(dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port);
bool phy_is_ready;
bool phy_is_owned;
enum tc_port_mode mode;
if (!tc_phy_is_owned(dig_port) ||
drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
return TC_PORT_TBT_ALT;
/*
* For legacy ports the IOM firmware initializes the PHY during boot-up
* and system resume whether or not a sink is connected. Wait here for
* the initialization to get ready.
*/
if (dig_port->tc_legacy_port)
tc_phy_wait_for_ready(dig_port);
mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
if (live_status_mask) {
enum tc_port_mode live_mode = fls(live_status_mask) - 1;
phy_is_ready = tc_phy_status_complete(dig_port);
phy_is_owned = tc_phy_is_owned(dig_port);
if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
mode = live_mode;
if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode);
} else {
drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode);
}
drm_dbg_kms(&i915->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
dig_port->tc_port_name,
tc_port_mode_name(mode),
str_yes_no(phy_is_ready),
str_yes_no(phy_is_owned),
tc_port_mode_name(live_mode));
return mode;
}
static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port)
{
if (dig_port->tc_legacy_port)
return TC_PORT_LEGACY;
return TC_PORT_TBT_ALT;
}
static enum tc_port_mode
hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask)
{
enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
if (mode != TC_PORT_DISCONNECTED)
return mode;
return default_tc_mode(dig_port);
}
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
u32 live_status_mask = tc_port_live_status_mask(dig_port);
if (live_status_mask)
return fls(live_status_mask) - 1;
return TC_PORT_TBT_ALT;
return hpd_mask_to_target_mode(dig_port, live_status_mask);
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
@ -660,11 +789,24 @@ static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
tc_cold_unblock(dig_port, domain, wref);
}
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
{
dig_port->tc_link_refcount = refcount;
dig_port->tc_link_refcount++;
}
static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
dig_port->tc_link_refcount--;
}
static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
assert_tc_port_power_enabled(dig_port);
return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
/**
@ -679,6 +821,7 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
bool update_mode = false;
mutex_lock(&dig_port->tc_lock);
@ -689,63 +832,105 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
tc_cold_wref = tc_cold_block(dig_port, &domain);
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
/*
* Save the initial mode for the state check in
* intel_tc_port_sanitize_mode().
*/
dig_port->tc_init_mode = dig_port->tc_mode;
if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
dig_port->tc_lock_wakeref =
tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
/*
* The PHY needs to be connected for AUX to work during HW readout and
* MST topology resume, but the PHY mode can only be changed if the
* port is disabled.
*
* An exception is the case where BIOS leaves the PHY incorrectly
* disconnected on an enabled legacy port. Work around that by
* connecting the PHY even though the port is enabled. This doesn't
* cause a problem as the PHY ownership state is ignored by the
* IOM/TCSS firmware (only display can own the PHY in that case).
*/
if (!tc_port_is_enabled(dig_port)) {
update_mode = true;
} else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) {
drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port);
drm_err(&i915->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
dig_port->tc_port_name);
update_mode = true;
}
if (update_mode)
intel_tc_port_update_mode(dig_port, 1, false);
/* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
intel_tc_port_link_init_refcount(dig_port, 1);
dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
__intel_tc_port_get_link(dig_port);
tc_cold_unblock(dig_port, domain, tc_cold_wref);
drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
mutex_unlock(&dig_port->tc_lock);
}
static bool tc_port_has_active_links(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
int active_links = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
active_links = intel_dp_mst_encoder_active_links(dig_port);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
active_links = 1;
}
if (active_links && !tc_phy_is_connected(dig_port, pll_type))
drm_err(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
return active_links;
}
/**
* intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
* @dig_port: digital port
* @crtc_state: atomic state of CRTC connected to @dig_port
*
* Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
* loading and system resume:
* If the encoder is enabled keep the TypeC mode/PHY connected state locked until
* the encoder is disabled.
* If the encoder is disabled make sure the PHY is disconnected.
* @crtc_state is valid if @dig_port is enabled, NULL otherwise.
*/
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
if (dig_port->dp.is_mst)
active_links = intel_dp_mst_encoder_active_links(dig_port);
else if (encoder->base.crtc)
active_links = to_intel_crtc(encoder->base.crtc)->active;
drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
intel_tc_port_link_init_refcount(dig_port, active_links);
if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
} else {
if (!tc_port_has_active_links(dig_port, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
* we'll just switch to disconnected mode from it here without
* a note.
*/
if (dig_port->tc_mode != TC_PORT_TBT_ALT)
if (dig_port->tc_init_mode != TC_PORT_TBT_ALT &&
dig_port->tc_init_mode != TC_PORT_DISCONNECTED)
drm_dbg_kms(&i915->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
tc_port_mode_name(dig_port->tc_init_mode));
icl_tc_phy_disconnect(dig_port);
__intel_tc_port_put_link(dig_port);
tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
fetch_and_zero(&dig_port->tc_lock_wakeref));
@ -768,16 +953,23 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
* connected ports are usable, and avoids exposing to the users objects they
* can't really use.
*/
bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode);
}
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
intel_tc_port_lock(dig_port);
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
is_connected = intel_tc_port_connected_locked(encoder);
intel_tc_port_unlock(dig_port);
return is_connected;
@ -857,14 +1049,14 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
__intel_tc_port_lock(dig_port, required_lanes);
dig_port->tc_link_refcount++;
__intel_tc_port_get_link(dig_port);
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
intel_tc_port_lock(dig_port);
--dig_port->tc_link_refcount;
__intel_tc_port_put_link(dig_port);
intel_tc_port_unlock(dig_port);
/*

View File

@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
@ -17,6 +18,7 @@ bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
bool intel_tc_port_connected_locked(struct intel_encoder *encoder);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@ -25,7 +27,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_flush_work(struct intel_digital_port *dig_port);

View File

@ -8,6 +8,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
/*
* This timing diagram depicts the video signal in and
@ -439,3 +440,94 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
{
wait_for_pipe_scanline_moving(crtc, true);
}
static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
/*
* The scanline counter increments at the leading edge of hsync.
*
* On most platforms it starts counting from vtotal-1 on the
* first active line. That means the scanline counter value is
* always one less than what we would expect. Ie. just after
* start of vblank, which also occurs at start of hsync (on the
* last active line), the scanline counter will read vblank_start-1.
*
* On gen2 the scanline counter starts counting from 1 instead
* of vtotal-1, so we have to subtract one (or rather add vtotal-1
* to keep the value positive), instead of adding one.
*
* On HSW+ the behaviour of the scanline counter depends on the output
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
*
* On VLV/CHV DSI the scanline counter would appear to increment
* approx. 1/3 of a scanline before start of vblank. Unfortunately
* that means we can't tell whether we're in vblank or not while
* we're on that particular line. We must still set scanline_offset
* to 1 so that the vblank timestamps come out correct when we query
* the scanline counter from within the vblank interrupt handler.
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
if (DISPLAY_VER(i915) == 2) {
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
return vtotal - 1;
} else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
return 2;
} else {
return 1;
}
}
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_display_mode adjusted_mode;
int vmax_vblank_start = 0;
unsigned long irqflags;
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (crtc_state->vrr.enable) {
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
}
/*
* Belts and suspenders locking to guarantee everyone sees 100%
* consistent state during fastset seamless refresh rate changes.
*
* vblank_time_lock takes care of all drm_vblank.c stuff, and
* uncore.lock takes care of __intel_get_crtc_scanline() which
* may get called elsewhere as well.
*
* TODO maybe just protect everything (including
* __intel_get_crtc_scanline()) with vblank_time_lock?
* Need to audit everything to make sure it's safe.
*/
spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
spin_lock(&i915->uncore.lock);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
crtc->vmax_vblank_start = vmax_vblank_start;
crtc->mode_flags = crtc_state->mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
spin_unlock(&i915->uncore.lock);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}

View File

@ -11,6 +11,7 @@
struct drm_crtc;
struct intel_crtc;
struct intel_crtc_state;
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
@ -19,5 +20,6 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VBLANK_H__ */

View File

@ -17,7 +17,6 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"

View File

@ -12,6 +12,7 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
@ -704,6 +705,28 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
const struct skl_wm_params *wp)
{
unsigned int latency = i915->display.wm.skl_latency[level];
if (latency == 0)
return 0;
/*
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
skl_watermark_ipc_enabled(i915))
latency += 4;
if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
latency += 15;
return latency;
}
static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
@ -723,7 +746,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_WARN_ON(&i915->drm, ret);
for (level = 0; level < i915->display.wm.num_levels; level++) {
unsigned int latency = i915->display.wm.skl_latency[level];
unsigned int latency = skl_wm_latency(i915, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@ -1839,17 +1862,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
/*
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
skl_watermark_ipc_enabled(i915))
latency += 4;
if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
latency += 15;
method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
@ -1976,7 +1988,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
for (level = 0; level < i915->display.wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
unsigned int latency = i915->display.wm.skl_latency[level];
unsigned int latency = skl_wm_latency(i915, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@ -1996,7 +2008,8 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
unsigned int latency = 0;
if (i915->display.sagv.block_time_us)
latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
latency = i915->display.sagv.block_time_us +
skl_wm_latency(i915, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
@ -2188,6 +2201,119 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
static bool
skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
int wm0_lines, int latency)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
/* FIXME missing scaler and DSC pre-fill time */
return crtc_state->framestart_delay +
intel_usecs_to_scanlines(adjusted_mode, latency) +
wm0_lines >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
}
static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum plane_id plane_id;
int wm0_lines = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
/* FIXME what about !skl_wm_has_lines() platforms? */
wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines);
}
return wm0_lines;
}
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int level;
for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
latency = skl_wm_latency(i915, level, NULL);
if (latency == 0)
continue;
/* FIXME is it correct to use 0 latency for wm0 here? */
if (level == 0)
latency = 0;
if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
return level;
}
return -EINVAL;
}
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
return 0;
wm0_lines = skl_max_wm0_lines(crtc_state);
level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
if (level < 0)
return level;
/*
* FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*
* FIXME also related to skl+ w/a 1136 (also unimplemented as of
* now) perhaps?
*/
for (level++; level < i915->display.wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
/*
* FIXME just clear enable or flag the entire
* thing as bad via min_ddb_alloc=U16_MAX?
*/
wm->wm[level].enable = false;
wm->uv_wm[level].enable = false;
}
}
if (DISPLAY_VER(i915) >= 12 &&
i915->display.sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
i915->display.sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
wm->sagv.wm0.enable = false;
wm->sagv.trans_wm.enable = false;
}
}
return 0;
}
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@ -2217,7 +2343,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
return 0;
return skl_wm_check_vblank(crtc_state);
}
static void skl_ddb_entry_write(struct drm_i915_private *i915,

View File

@ -1072,7 +1072,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc);
/* Enable Frame time stamo based scanline reporting */
pipe_config->mode_flags |=

View File

@ -440,6 +440,8 @@
#define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
#define HECI1_FW_LIMIT_VALID (1 << 31)
#define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6)
/*
* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,

View File

@ -0,0 +1,109 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_ring.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
struct gsc_heci_pkt {
u64 addr_in;
u32 size_in;
u64 addr_out;
u32 size_out;
};
static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
{
u32 *cs;
cs = intel_ring_begin(rq, 8);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GSC_HECI_CMD_PKT;
*cs++ = lower_32_bits(pkt->addr_in);
*cs++ = upper_32_bits(pkt->addr_in);
*cs++ = pkt->size_in;
*cs++ = lower_32_bits(pkt->addr_out);
*cs++ = upper_32_bits(pkt->addr_out);
*cs++ = pkt->size_out;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in,
u32 size_in, u64 addr_out,
u32 size_out)
{
struct intel_context *ce = gsc->ce;
struct i915_request *rq;
struct gsc_heci_pkt pkt = {
.addr_in = addr_in,
.size_in = size_in,
.addr_out = addr_out,
.size_out = size_out
};
int err;
if (!ce)
return -ENODEV;
rq = i915_request_create(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
if (ce->engine->emit_init_breadcrumb) {
err = ce->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
err = emit_gsc_heci_pkt(rq, &pkt);
if (err)
goto out_rq;
err = ce->engine->emit_flush(rq, 0);
out_rq:
i915_request_get(rq);
if (unlikely(err))
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
"Request submission for GSC heci cmd failed (%d)\n",
err);
return err;
}
void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
u8 heci_client_id, u32 message_size,
u64 host_session_id)
{
host_session_id &= ~HOST_SESSION_MASK;
if (heci_client_id == HECI_MEADDRESS_PXP)
host_session_id |= HOST_SESSION_PXP_SINGLE;
header->validity_marker = GSC_HECI_VALIDITY_MARKER;
header->heci_client_id = heci_client_id;
header->host_session_handle = host_session_id;
header->header_version = MTL_GSC_HEADER_VERSION;
header->message_size = message_size;
}

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
#define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_
#include <linux/types.h>
struct intel_gsc_uc;
struct intel_gsc_mtl_header {
u32 validity_marker;
#define GSC_HECI_VALIDITY_MARKER 0xA578875A
u8 heci_client_id;
#define HECI_MEADDRESS_PXP 17
#define HECI_MEADDRESS_HDCP 18
u8 reserved1;
u16 header_version;
#define MTL_GSC_HEADER_VERSION 1
/*
* FW allows host to decide host_session handle
* as it sees fit.
* For intertracebility reserving select bits(60-63)
* to differentiate caller-target subsystem
* 0000 - HDCP
* 0001 - PXP Single Session
*/
u64 host_session_handle;
#define HOST_SESSION_MASK REG_GENMASK64(63, 60)
#define HOST_SESSION_PXP_SINGLE BIT_ULL(60)
u64 gsc_message_handle;
u32 message_size; /* lower 20 bits only, upper 12 are reserved */
/*
* Flags mask:
* Bit 0: Pending
* Bit 1: Session Cleanup;
* Bits 2-15: Flags
* Bits 16-31: Extension Size
* According to internal spec flags are either input or output
* we distinguish the flags using OUTFLAG or INFLAG
*/
u32 flags;
#define GSC_OUTFLAG_MSG_PENDING 1
u32 status;
} __packed;
int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc,
u64 addr_in, u32 size_in,
u64 addr_out, u32 size_out);
void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
u8 heci_client_id, u32 message_size,
u64 host_session_id);
#endif

View File

@ -535,7 +535,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_pcode_init(dev_priv);
if (ret)
goto err_msi;
goto err_opregion;
/*
* Fill the dram structure to get the system dram info. This will be
@ -556,6 +556,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
return 0;
err_opregion:
intel_opregion_cleanup(dev_priv);
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
@ -581,6 +583,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
intel_opregion_cleanup(dev_priv);
if (pdev->msi_enabled)
pci_disable_msi(pdev);

View File

@ -1794,9 +1794,11 @@
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS (1 << 27)
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
#define DARBF_GATING_DIS REG_BIT(27)
#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
#define PWM2_GATING_DIS REG_BIT(14)
#define PWM1_GATING_DIS REG_BIT(13)
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
#define TGL_VRH_GATING_DIS REG_BIT(31)
@ -3495,36 +3497,38 @@
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
/*
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
* valid values of: 6, 8, 10 BPC.
* ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
* 6, 8, 10, 12 BPC.
*/
#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5)
#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0)
#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1)
#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2)
#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */
#define PIPEMISC_DITHER_ENABLE REG_BIT(4)
#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0)
#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1)
#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2)
#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3)
#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A)
#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5)
#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0)
#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1)
#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2)
#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */
#define PIPE_MISC_DITHER_ENABLE REG_BIT(4)
#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2)
#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0)
#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define _PIPE_MISC2_A 0x7002C
#define _PIPE_MISC2_B 0x7102C
#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A)
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
/* Skylake+ pipe bottom (background) color */
#define _SKL_BOTTOM_COLOR_A 0x70034
@ -4390,6 +4394,7 @@
#define SP_CONST_ALPHA_ENABLE REG_BIT(31)
#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0)
#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha))
#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac)
#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
#define SP_CONTRAST_MASK REG_GENMASK(26, 18)
#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */
@ -4413,6 +4418,7 @@
#define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
#define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
@ -4433,6 +4439,7 @@
#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
@ -4584,10 +4591,13 @@
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
#define PLANE_KEYMSK_ALPHA_ENABLE REG_BIT(31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
#define PLANE_KEYMAX_ALPHA_MASK REG_GENMASK(31, 24)
#define PLANE_KEYMAX_ALPHA(a) REG_FIELD_PREP(PLANE_KEYMAX_ALPHA_MASK, (a))
#define _PLANE_SURFLIVE_1_A 0x701ac
#define _PLANE_SURFLIVE_2_A 0x702ac
#define _PLANE_CC_VAL_1_A 0x701b4
#define _PLANE_CC_VAL_2_A 0x702b4
#define _PLANE_AUX_DIST_1_A 0x701c0
@ -4772,6 +4782,13 @@
#define PLANE_KEYMAX(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
#define _PLANE_SURFLIVE_1_B 0x711ac
#define _PLANE_SURFLIVE_2_B 0x712ac
#define _PLANE_SURFLIVE_1(pipe) _PIPE(pipe, _PLANE_SURFLIVE_1_A, _PLANE_SURFLIVE_1_B)
#define _PLANE_SURFLIVE_2(pipe) _PIPE(pipe, _PLANE_SURFLIVE_2_A, _PLANE_SURFLIVE_2_B)
#define PLANE_SURFLIVE(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe))
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
@ -7232,6 +7249,8 @@ enum skl_power_gate {
#define DC_STATE_DISABLE 0
#define DC_STATE_EN_DC3CO REG_BIT(30)
#define DC_STATE_DC3CO_STATUS REG_BIT(29)
#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21)
#define HOLD_PHY_PG1_LATCH REG_BIT(20)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@ -7541,9 +7560,29 @@ enum skl_power_gate {
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
/* g4x+, except vlv/chv! */
#define _PIPE_FRMTMSTMP_A 0x70048
#define _PIPE_FRMTMSTMP_B 0x71048
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
_MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B)
/* g4x+, except vlv/chv! */
#define _PIPE_FLIPTMSTMP_A 0x7004C
#define _PIPE_FLIPTMSTMP_B 0x7104C
#define PIPE_FLIPTMSTMP(pipe) \
_MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B)
/* tgl+ */
#define _PIPE_FLIPDONETMSTMP_A 0x70054
#define _PIPE_FLIPDONETMSTMP_B 0x71054
#define PIPE_FLIPDONETIMSTMP(pipe) \
_MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B)
#define _VLV_PIPE_MSA_MISC_A 0x70048
#define VLV_PIPE_MSA_MISC(pipe) \
_MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A)
#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31)
#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */
#define GGC _MMIO(0x108040)
#define GMS_MASK REG_GENMASK(15, 8)

View File

@ -789,9 +789,9 @@ static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_RING_D(RING_REG);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A));
MMIO_D(PIPEMISC(PIPE_B));
MMIO_D(PIPEMISC(PIPE_C));
MMIO_D(PIPE_MISC(PIPE_A));
MMIO_D(PIPE_MISC(PIPE_B));
MMIO_D(PIPE_MISC(PIPE_C));
MMIO_D(_MMIO(0x1c1d0));
MMIO_D(GEN6_MBCUNIT_SNPCR);
MMIO_D(GEN7_MISCCPCTL);

View File

@ -23,7 +23,7 @@
#include <linux/component.h>
#include <drm/drm_connector.h>
#include <drm/i915_component.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <drm/i915_hdcp_interface.h>
#include "mei_hdcp.h"
@ -52,13 +52,13 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
session_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
session_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_init_in.header.buffer_len =
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
session_init_in.port.physical_port = (u8)data->fw_ddi;
session_init_in.port.attached_transcoder = (u8)data->fw_tc;
session_init_in.port.physical_port = (u8)data->hdcp_ddi;
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
@ -75,7 +75,7 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
return byte;
}
if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_INITIATE_HDCP2_SESSION,
session_init_out.header.status);
@ -122,13 +122,13 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS;
verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_rxcert_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
verify_rxcert_in.port.physical_port = (u8)data->fw_ddi;
verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc;
verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi;
verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
@ -148,7 +148,7 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
return byte;
}
if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_VERIFY_RECEIVER_CERT,
verify_rxcert_out.header.status);
@ -194,12 +194,12 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
send_hprime_in.port.physical_port = (u8)data->fw_ddi;
send_hprime_in.port.attached_transcoder = (u8)data->fw_tc;
send_hprime_in.port.physical_port = (u8)data->hdcp_ddi;
send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
@ -218,7 +218,7 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return byte;
}
if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
return -EIO;
@ -251,13 +251,13 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS;
pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS;
pairing_info_in.header.buffer_len =
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
pairing_info_in.port.physical_port = (u8)data->fw_ddi;
pairing_info_in.port.attached_transcoder = (u8)data->fw_tc;
pairing_info_in.port.physical_port = (u8)data->hdcp_ddi;
pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
@ -276,7 +276,7 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
return byte;
}
if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n",
WIRED_AKE_SEND_PAIRING_INFO,
pairing_info_out.header.status);
@ -311,12 +311,12 @@ mei_hdcp_initiate_locality_check(struct device *dev,
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS;
lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS;
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
lc_init_in.port.physical_port = (u8)data->fw_ddi;
lc_init_in.port.attached_transcoder = (u8)data->fw_tc;
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
@ -330,7 +330,7 @@ mei_hdcp_initiate_locality_check(struct device *dev,
return byte;
}
if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n",
WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
return -EIO;
@ -366,13 +366,13 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS;
verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_lprime_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
verify_lprime_in.port.physical_port = (u8)data->fw_ddi;
verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc;
verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi;
verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
@ -391,7 +391,7 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return byte;
}
if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VALIDATE_LOCALITY,
verify_lprime_out.header.status);
@ -425,12 +425,12 @@ static int mei_hdcp_get_session_key(struct device *dev,
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS;
get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS;
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
get_skey_in.port.physical_port = (u8)data->fw_ddi;
get_skey_in.port.attached_transcoder = (u8)data->fw_tc;
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
@ -445,7 +445,7 @@ static int mei_hdcp_get_session_key(struct device *dev,
return byte;
}
if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_GET_SESSION_KEY, get_skey_out.header.status);
return -EIO;
@ -489,13 +489,13 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS;
verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS;
verify_repeater_in.header.buffer_len =
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
verify_repeater_in.port.physical_port = (u8)data->fw_ddi;
verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc;
verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi;
verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
@ -520,7 +520,7 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
return byte;
}
if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_VERIFY_REPEATER,
verify_repeater_out.header.status);
@ -568,12 +568,12 @@ static int mei_hdcp_verify_mprime(struct device *dev,
verify_mprime_in->header.api_version = HDCP_API_VERSION;
verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ;
verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS;
verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS;
verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header);
verify_mprime_in->port.integrated_port_type = data->port_type;
verify_mprime_in->port.physical_port = (u8)data->fw_ddi;
verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc;
verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi;
verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder;
memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN);
drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m);
@ -597,7 +597,7 @@ static int mei_hdcp_verify_mprime(struct device *dev,
return byte;
}
if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_REPEATER_AUTH_STREAM_REQ,
verify_mprime_out.header.status);
@ -630,12 +630,12 @@ static int mei_hdcp_enable_authentication(struct device *dev,
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS;
enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS;
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
enable_auth_in.port.physical_port = (u8)data->fw_ddi;
enable_auth_in.port.attached_transcoder = (u8)data->fw_tc;
enable_auth_in.port.physical_port = (u8)data->hdcp_ddi;
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
@ -652,7 +652,7 @@ static int mei_hdcp_enable_authentication(struct device *dev,
return byte;
}
if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n",
WIRED_ENABLE_AUTH, enable_auth_out.header.status);
return -EIO;
@ -684,13 +684,13 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
session_close_in.header.status = ME_HDCP_STATUS_SUCCESS;
session_close_in.header.status = FW_HDCP_STATUS_SUCCESS;
session_close_in.header.buffer_len =
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
session_close_in.port.physical_port = (u8)data->fw_ddi;
session_close_in.port.attached_transcoder = (u8)data->fw_tc;
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
@ -706,7 +706,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
return byte;
}
if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) {
if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
dev_dbg(dev, "Session Close Failed. status: 0x%X\n",
session_close_out.header.status);
return -EIO;
@ -715,7 +715,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
static const struct i915_hdcp_component_ops mei_hdcp_ops = {
static const struct i915_hdcp_ops mei_hdcp_ops = {
.owner = THIS_MODULE,
.initiate_hdcp2_session = mei_hdcp_initiate_session,
.verify_receiver_cert_prepare_km =
@ -735,13 +735,12 @@ static const struct i915_hdcp_component_ops mei_hdcp_ops = {
static int mei_component_master_bind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_hdcp_comp_master *comp_master =
mei_cldev_get_drvdata(cldev);
struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
dev_dbg(dev, "%s\n", __func__);
comp_master->ops = &mei_hdcp_ops;
comp_master->mei_dev = dev;
comp_master->hdcp_dev = dev;
ret = component_bind_all(dev, comp_master);
if (ret < 0)
return ret;
@ -752,8 +751,7 @@ static int mei_component_master_bind(struct device *dev)
static void mei_component_master_unbind(struct device *dev)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
struct i915_hdcp_comp_master *comp_master =
mei_cldev_get_drvdata(cldev);
struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
dev_dbg(dev, "%s\n", __func__);
component_unbind_all(dev, comp_master);
@ -801,7 +799,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
static int mei_hdcp_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
struct i915_hdcp_comp_master *comp_master;
struct i915_hdcp_master *comp_master;
struct component_match *master_match;
int ret;
@ -846,8 +844,7 @@ static int mei_hdcp_probe(struct mei_cl_device *cldev,
static void mei_hdcp_remove(struct mei_cl_device *cldev)
{
struct i915_hdcp_comp_master *comp_master =
mei_cldev_get_drvdata(cldev);
struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev);
int ret;
component_master_del(&cldev->dev, &mei_component_master_ops);

View File

@ -11,358 +11,4 @@
#include <drm/display/drm_hdcp.h>
/* me_hdcp_status: Enumeration of all HDCP Status Codes */
enum me_hdcp_status {
ME_HDCP_STATUS_SUCCESS = 0x0000,
/* WiDi Generic Status Codes */
ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
ME_HDCP_STATUS_INVALID_PARAMS = 0x1005,
ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
/* WiDi Status Codes */
ME_HDCP_INVALID_SESSION_STATE = 0x6000,
ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
ME_HDCP_SRM_INVALID_LENGTH = 0x6002,
ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
ME_HDCP_RX_REVOKED = 0x6007,
ME_HDCP_H_VERIFICATION_FAILED = 0x6008,
ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
ME_HDCP_V_VERIFICATION_FAILED = 0x600B,
ME_HDCP_L_VERIFICATION_FAILED = 0x600C,
ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
ME_HDCP_NONCE_GENERATION_FAILED = 0x600F,
ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
/* New status for HDCP 2.1 */
ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
/* New status code for HDCP 2.2 Rx */
ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
ME_HDCP_FAIL_NOT_EXPECTED = 0x6023,
ME_HDCP_FAIL_HDCP_OFF = 0x6024,
ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
ME_HDCP_DMA_READ_ERROR = 0x6028,
ME_HDCP_DMA_WRITE_ERROR = 0x6029,
ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
ME_HDCP_H264_PARSING_ERROR = 0x6031,
ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
ME_HDCP_TX_ACTIVE_ERROR = 0x6034,
ME_HDCP_MODE_CHANGE_ERROR = 0x6035,
ME_HDCP_STREAM_TYPE_ERROR = 0x6036,
ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
/* hdcp capable bit is not set in rx_caps(error is unique to DP) */
ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
};
#define HDCP_API_VERSION 0x00010000
#define HDCP_M_LEN 16
#define HDCP_KH_LEN 16
/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
/* Wired_Tx_AKE */
#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
/* Wired_Tx_LC */
#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
/* Wired_Tx_SKE */
#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
/* Wired_Tx_SKE */
#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
/* Wired_Tx_Repeater */
#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
32 + 2 + 2)
#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
enum hdcp_command_id {
_WIDI_COMMAND_BASE = 0x00030000,
WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
HDCP_GET_SRM_STATUS,
HDCP_SEND_SRM_FRAGMENT,
/* The wired HDCP Tx commands */
_WIRED_COMMAND_BASE = 0x00031000,
WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
WIRED_VERIFY_RECEIVER_CERT,
WIRED_AKE_SEND_HPRIME,
WIRED_AKE_SEND_PAIRING_INFO,
WIRED_INIT_LOCALITY_CHECK,
WIRED_VALIDATE_LOCALITY,
WIRED_GET_SESSION_KEY,
WIRED_ENABLE_AUTH,
WIRED_VERIFY_REPEATER,
WIRED_REPEATER_AUTH_STREAM_REQ,
WIRED_CLOSE_SESSION,
_WIRED_COMMANDS_COUNT,
};
union encrypted_buff {
u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
struct {
u8 e_kh_km[HDCP_KH_LEN];
u8 m[HDCP_M_LEN];
} __packed;
};
/* HDCP HECI message header. All header values are little endian. */
struct hdcp_cmd_header {
u32 api_version;
u32 command_id;
enum me_hdcp_status status;
/* Length of the HECI message (excluding the header) */
u32 buffer_len;
} __packed;
/* Empty command request or response. No data follows the header. */
struct hdcp_cmd_no_data {
struct hdcp_cmd_header header;
} __packed;
/* Uniquely identifies the hdcp port being addressed for a given command. */
struct hdcp_port_id {
u8 integrated_port_type;
/* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
u8 physical_port;
/* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
u8 attached_transcoder;
u8 reserved;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in
* support of the AKE protocol
*/
/* HECI struct for integrated wired HDCP Tx session initiation. */
struct wired_cmd_initiate_hdcp2_session_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 protocol; /* for HDMI vs DP */
} __packed;
struct wired_cmd_initiate_hdcp2_session_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 r_tx[HDCP_2_2_RTX_LEN];
struct hdcp2_tx_caps tx_caps;
} __packed;
/* HECI struct for ending an integrated wired HDCP Tx session. */
struct wired_cmd_close_session_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_close_session_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
struct wired_cmd_verify_receiver_cert_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
struct hdcp2_cert_rx cert_rx;
u8 r_rx[HDCP_2_2_RRX_LEN];
u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
} __packed;
struct wired_cmd_verify_receiver_cert_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 km_stored;
u8 reserved[3];
union encrypted_buff ekm_buff;
} __packed;
/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
struct wired_cmd_ake_send_hprime_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 h_prime[HDCP_2_2_H_PRIME_LEN];
} __packed;
struct wired_cmd_ake_send_hprime_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* HECI struct for sending in AKE pairing data generated by the Rx in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_ake_send_pairing_info_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
} __packed;
struct wired_cmd_ake_send_pairing_info_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
/*
* HECI struct for initiating locality check with an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_init_locality_check_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_init_locality_check_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 r_n[HDCP_2_2_RN_LEN];
} __packed;
/*
* HECI struct for validating an Rx's LPrime value in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_validate_locality_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 l_prime[HDCP_2_2_L_PRIME_LEN];
} __packed;
struct wired_cmd_validate_locality_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in support of the
* SKE protocol
*/
/* HECI struct for creating session key */
struct wired_cmd_get_session_key_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_get_session_key_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
u8 r_iv[HDCP_2_2_RIV_LEN];
} __packed;
/* HECI struct for the Tx enable authentication command */
struct wired_cmd_enable_auth_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 stream_type;
} __packed;
struct wired_cmd_enable_auth_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in support of
* the repeater protocols
*/
/*
* HECI struct for verifying the downstream repeater's HDCP topology in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_verify_repeater_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 rx_info[HDCP_2_2_RXINFO_LEN];
u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
} __packed;
struct wired_cmd_verify_repeater_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 content_type_supported;
u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
} __packed;
/*
* HECI struct in support of stream management in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_repeater_auth_stream_req_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
u8 m_prime[HDCP_2_2_MPRIME_LEN];
__be16 k;
struct hdcp2_streamid_type streams[];
} __packed;
struct wired_cmd_repeater_auth_stream_req_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
#endif /* __MEI_HDCP_H__ */

View File

@ -692,6 +692,9 @@
# define DP_FEC_LANE_2_SELECT (2 << 4)
# define DP_FEC_LANE_3_SELECT (3 << 4)
#define DP_SDP_ERROR_DETECTION_CONFIGURATION 0x121 /* DP 2.0 E11 */
#define DP_SDP_CRC16_128B132B_EN BIT(0)
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)

View File

@ -0,0 +1,539 @@
/* SPDX-License-Identifier: (GPL-2.0+) */
/*
* Copyright © 2017-2019 Intel Corporation
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
*/
#ifndef _I915_HDCP_INTERFACE_H_
#define _I915_HDCP_INTERFACE_H_
#include <linux/mutex.h>
#include <linux/device.h>
#include <drm/display/drm_hdcp.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME/GSC FW
* @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
* @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
* @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
* (HDMI 2.0) solution
* @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
* solution
*/
enum hdcp_port_type {
HDCP_PORT_TYPE_INVALID,
HDCP_PORT_TYPE_INTEGRATED,
HDCP_PORT_TYPE_LSPCON,
HDCP_PORT_TYPE_CPDP
};
/**
* enum hdcp_wired_protocol - HDCP adaptation used on the port
* @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
* @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
* @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
*/
enum hdcp_wired_protocol {
HDCP_PROTOCOL_INVALID,
HDCP_PROTOCOL_HDMI,
HDCP_PROTOCOL_DP
};
enum hdcp_ddi {
HDCP_DDI_INVALID_PORT = 0x0,
HDCP_DDI_B = 1,
HDCP_DDI_C,
HDCP_DDI_D,
HDCP_DDI_E,
HDCP_DDI_F,
HDCP_DDI_A = 7,
HDCP_DDI_RANGE_END = HDCP_DDI_A,
};
/**
* enum hdcp_tc - ME/GSC Firmware defined index for transcoders
* @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder
* @HDCP_TRANSCODER_EDP: Index for EDP Transcoder
* @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder
* @HDCP_TRANSCODER_DSI1: Index for DSI1 Transcoder
* @HDCP_TRANSCODER_A: Index for Transcoder A
* @HDCP_TRANSCODER_B: Index for Transcoder B
* @HDCP_TRANSCODER_C: Index for Transcoder C
* @HDCP_TRANSCODER_D: Index for Transcoder D
*/
enum hdcp_transcoder {
HDCP_INVALID_TRANSCODER = 0x00,
HDCP_TRANSCODER_EDP,
HDCP_TRANSCODER_DSI0,
HDCP_TRANSCODER_DSI1,
HDCP_TRANSCODER_A = 0x10,
HDCP_TRANSCODER_B,
HDCP_TRANSCODER_C,
HDCP_TRANSCODER_D
};
/**
* struct hdcp_port_data - intel specific HDCP port data
* @hdcp_ddi: ddi index as per ME/GSC FW
* @hdcp_transcoder: transcoder index as per ME/GSC FW
* @port_type: HDCP port type as per ME/GSC FW classification
* @protocol: HDCP adaptation as per ME/GSC FW
* @k: No of streams transmitted on a port. Only on DP MST this is != 1
* @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
* Initialized to 0 on AKE_INIT. Incremented after every successful
* transmission of RepeaterAuth_Stream_Manage message. When it rolls
* over re-Auth has to be triggered.
* @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
* streams
*/
struct hdcp_port_data {
enum hdcp_ddi hdcp_ddi;
enum hdcp_transcoder hdcp_transcoder;
u8 port_type;
u8 protocol;
u16 k;
u32 seq_num_m;
struct hdcp2_streamid_type *streams;
};
/**
* struct i915_hdcp_ops- ops for HDCP2.2 services.
* @owner: Module providing the ops
* @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
* And Prepare AKE_Init.
* @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
* AKE_Send_Cert and prepare
AKE_Stored_Km/AKE_No_Stored_Km
* @verify_hprime: Verify AKE_Send_H_prime
* @store_pairing_info: Store pairing info received
* @initiate_locality_check: Prepare LC_Init
* @verify_lprime: Verify lprime
* @get_session_key: Prepare SKE_Send_Eks
* @repeater_check_flow_prepare_ack: Validate the Downstream topology
* and prepare rep_ack
* @verify_mprime: Verify mprime
* @enable_hdcp_authentication: Mark a port as authenticated.
* @close_hdcp_session: Close the Wired HDCP Tx session per port.
* This also disables the authenticated state of the port.
*/
struct i915_hdcp_ops {
/**
* @owner: hdcp module
*/
struct module *owner;
int (*initiate_hdcp2_session)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data);
int (*verify_receiver_cert_prepare_km)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert
*rx_cert,
bool *km_stored,
struct hdcp2_ake_no_stored_km
*ek_pub_km,
size_t *msg_sz);
int (*verify_hprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime);
int (*store_pairing_info)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info
*pairing_info);
int (*initiate_locality_check)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data);
int (*verify_lprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime);
int (*get_session_key)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ske_send_eks *ske_data);
int (*repeater_check_flow_prepare_ack)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
*rep_topology,
struct hdcp2_rep_send_ack
*rep_send_ack);
int (*verify_mprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready);
int (*enable_hdcp_authentication)(struct device *dev,
struct hdcp_port_data *data);
int (*close_hdcp_session)(struct device *dev,
struct hdcp_port_data *data);
};
/**
* struct i915_hdcp_master - Used for communication between i915
* and hdcp drivers for the HDCP2.2 services
* @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus.
* @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver.
*/
struct i915_hdcp_master {
struct device *hdcp_dev;
const struct i915_hdcp_ops *ops;
/* To protect the above members. */
struct mutex mutex;
};
/* fw_hdcp_status: Enumeration of all HDCP Status Codes */
enum fw_hdcp_status {
FW_HDCP_STATUS_SUCCESS = 0x0000,
/* WiDi Generic Status Codes */
FW_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
FW_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
FW_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
FW_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
FW_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
FW_HDCP_STATUS_INVALID_PARAMS = 0x1005,
FW_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
/* WiDi Status Codes */
FW_HDCP_INVALID_SESSION_STATE = 0x6000,
FW_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
FW_HDCP_SRM_INVALID_LENGTH = 0x6002,
FW_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
FW_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
FW_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
FW_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
FW_HDCP_RX_REVOKED = 0x6007,
FW_HDCP_H_VERIFICATION_FAILED = 0x6008,
FW_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
FW_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
FW_HDCP_V_VERIFICATION_FAILED = 0x600B,
FW_HDCP_L_VERIFICATION_FAILED = 0x600C,
FW_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
FW_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
FW_HDCP_NONCE_GENERATION_FAILED = 0x600F,
FW_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
FW_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
FW_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
FW_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
FW_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
FW_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
/* New status for HDCP 2.1 */
FW_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
/* New status code for HDCP 2.2 Rx */
FW_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
FW_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
FW_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
FW_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
FW_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
FW_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
FW_HDCP_FAIL_NOT_EXPECTED = 0x6023,
FW_HDCP_FAIL_HDCP_OFF = 0x6024,
FW_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
FW_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
FW_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
FW_HDCP_DMA_READ_ERROR = 0x6028,
FW_HDCP_DMA_WRITE_ERROR = 0x6029,
FW_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
FW_HDCP_H264_PARSING_ERROR = 0x6031,
FW_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
FW_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
FW_HDCP_TX_ACTIVE_ERROR = 0x6034,
FW_HDCP_MODE_CHANGE_ERROR = 0x6035,
FW_HDCP_STREAM_TYPE_ERROR = 0x6036,
FW_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
FW_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
FW_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
FW_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
FW_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
FW_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
FW_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
/* hdcp capable bit is not set in rx_caps(error is unique to DP) */
FW_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
FW_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
};
#define HDCP_API_VERSION 0x00010000
#define HDCP_M_LEN 16
#define HDCP_KH_LEN 16
/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
/* Wired_Tx_AKE */
#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
/* Wired_Tx_LC */
#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
/* Wired_Tx_SKE */
#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
/* Wired_Tx_SKE */
#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
/* Wired_Tx_Repeater */
#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
32 + 2 + 2)
#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
enum hdcp_command_id {
_WIDI_COMMAND_BASE = 0x00030000,
WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
HDCP_GET_SRM_STATUS,
HDCP_SEND_SRM_FRAGMENT,
/* The wired HDCP Tx commands */
_WIRED_COMMAND_BASE = 0x00031000,
WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
WIRED_VERIFY_RECEIVER_CERT,
WIRED_AKE_SEND_HPRIME,
WIRED_AKE_SEND_PAIRING_INFO,
WIRED_INIT_LOCALITY_CHECK,
WIRED_VALIDATE_LOCALITY,
WIRED_GET_SESSION_KEY,
WIRED_ENABLE_AUTH,
WIRED_VERIFY_REPEATER,
WIRED_REPEATER_AUTH_STREAM_REQ,
WIRED_CLOSE_SESSION,
_WIRED_COMMANDS_COUNT,
};
union encrypted_buff {
u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
struct {
u8 e_kh_km[HDCP_KH_LEN];
u8 m[HDCP_M_LEN];
} __packed;
};
/* HDCP HECI message header. All header values are little endian. */
struct hdcp_cmd_header {
u32 api_version;
u32 command_id;
enum fw_hdcp_status status;
/* Length of the HECI message (excluding the header) */
u32 buffer_len;
} __packed;
/* Empty command request or response. No data follows the header. */
struct hdcp_cmd_no_data {
struct hdcp_cmd_header header;
} __packed;
/* Uniquely identifies the hdcp port being addressed for a given command. */
struct hdcp_port_id {
u8 integrated_port_type;
/* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
u8 physical_port;
/* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
u8 attached_transcoder;
u8 reserved;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in
* support of the AKE protocol
*/
/* HECI struct for integrated wired HDCP Tx session initiation. */
struct wired_cmd_initiate_hdcp2_session_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 protocol; /* for HDMI vs DP */
} __packed;
struct wired_cmd_initiate_hdcp2_session_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 r_tx[HDCP_2_2_RTX_LEN];
struct hdcp2_tx_caps tx_caps;
} __packed;
/* HECI struct for ending an integrated wired HDCP Tx session. */
struct wired_cmd_close_session_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_close_session_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
struct wired_cmd_verify_receiver_cert_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
struct hdcp2_cert_rx cert_rx;
u8 r_rx[HDCP_2_2_RRX_LEN];
u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
} __packed;
struct wired_cmd_verify_receiver_cert_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 km_stored;
u8 reserved[3];
union encrypted_buff ekm_buff;
} __packed;
/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
struct wired_cmd_ake_send_hprime_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 h_prime[HDCP_2_2_H_PRIME_LEN];
} __packed;
struct wired_cmd_ake_send_hprime_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* HECI struct for sending in AKE pairing data generated by the Rx in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_ake_send_pairing_info_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
} __packed;
struct wired_cmd_ake_send_pairing_info_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
/*
* HECI struct for initiating locality check with an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_init_locality_check_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_init_locality_check_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 r_n[HDCP_2_2_RN_LEN];
} __packed;
/*
* HECI struct for validating an Rx's LPrime value in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_validate_locality_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 l_prime[HDCP_2_2_L_PRIME_LEN];
} __packed;
struct wired_cmd_validate_locality_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in support of the
* SKE protocol
*/
/* HECI struct for creating session key */
struct wired_cmd_get_session_key_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
struct wired_cmd_get_session_key_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
u8 r_iv[HDCP_2_2_RIV_LEN];
} __packed;
/* HECI struct for the Tx enable authentication command */
struct wired_cmd_enable_auth_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 stream_type;
} __packed;
struct wired_cmd_enable_auth_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
/*
* Data structures for integrated wired HDCP2 Tx in support of
* the repeater protocols
*/
/*
* HECI struct for verifying the downstream repeater's HDCP topology in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_verify_repeater_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 rx_info[HDCP_2_2_RXINFO_LEN];
u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
} __packed;
struct wired_cmd_verify_repeater_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 content_type_supported;
u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
} __packed;
/*
* HECI struct in support of stream management in an
* integrated wired HDCP Tx session.
*/
struct wired_cmd_repeater_auth_stream_req_in {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
u8 m_prime[HDCP_2_2_MPRIME_LEN];
__be16 k;
struct hdcp2_streamid_type streams[];
} __packed;
struct wired_cmd_repeater_auth_stream_req_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
#endif /* _I915_HDCP_INTERFACE_H_ */

View File

@ -1,184 +0,0 @@
/* SPDX-License-Identifier: (GPL-2.0+) */
/*
* Copyright © 2017-2019 Intel Corporation
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
*/
#ifndef _I915_MEI_HDCP_INTERFACE_H_
#define _I915_MEI_HDCP_INTERFACE_H_
#include <linux/mutex.h>
#include <linux/device.h>
#include <drm/display/drm_hdcp.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME FW
* @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
* @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
* @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
* (HDMI 2.0) solution
* @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
* solution
*/
enum hdcp_port_type {
HDCP_PORT_TYPE_INVALID,
HDCP_PORT_TYPE_INTEGRATED,
HDCP_PORT_TYPE_LSPCON,
HDCP_PORT_TYPE_CPDP
};
/**
* enum hdcp_wired_protocol - HDCP adaptation used on the port
* @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
* @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
* @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
*/
enum hdcp_wired_protocol {
HDCP_PROTOCOL_INVALID,
HDCP_PROTOCOL_HDMI,
HDCP_PROTOCOL_DP
};
enum mei_fw_ddi {
MEI_DDI_INVALID_PORT = 0x0,
MEI_DDI_B = 1,
MEI_DDI_C,
MEI_DDI_D,
MEI_DDI_E,
MEI_DDI_F,
MEI_DDI_A = 7,
MEI_DDI_RANGE_END = MEI_DDI_A,
};
/**
* enum mei_fw_tc - ME Firmware defined index for transcoders
* @MEI_INVALID_TRANSCODER: Index for Invalid transcoder
* @MEI_TRANSCODER_EDP: Index for EDP Transcoder
* @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder
* @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder
* @MEI_TRANSCODER_A: Index for Transcoder A
* @MEI_TRANSCODER_B: Index for Transcoder B
* @MEI_TRANSCODER_C: Index for Transcoder C
* @MEI_TRANSCODER_D: Index for Transcoder D
*/
enum mei_fw_tc {
MEI_INVALID_TRANSCODER = 0x00,
MEI_TRANSCODER_EDP,
MEI_TRANSCODER_DSI0,
MEI_TRANSCODER_DSI1,
MEI_TRANSCODER_A = 0x10,
MEI_TRANSCODER_B,
MEI_TRANSCODER_C,
MEI_TRANSCODER_D
};
/**
* struct hdcp_port_data - intel specific HDCP port data
* @fw_ddi: ddi index as per ME FW
* @fw_tc: transcoder index as per ME FW
* @port_type: HDCP port type as per ME FW classification
* @protocol: HDCP adaptation as per ME FW
* @k: No of streams transmitted on a port. Only on DP MST this is != 1
* @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
* Initialized to 0 on AKE_INIT. Incremented after every successful
* transmission of RepeaterAuth_Stream_Manage message. When it rolls
* over re-Auth has to be triggered.
* @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
* streams
*/
struct hdcp_port_data {
enum mei_fw_ddi fw_ddi;
enum mei_fw_tc fw_tc;
u8 port_type;
u8 protocol;
u16 k;
u32 seq_num_m;
struct hdcp2_streamid_type *streams;
};
/**
* struct i915_hdcp_component_ops- ops for HDCP2.2 services.
* @owner: Module providing the ops
* @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
* And Prepare AKE_Init.
* @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
* AKE_Send_Cert and prepare
AKE_Stored_Km/AKE_No_Stored_Km
* @verify_hprime: Verify AKE_Send_H_prime
* @store_pairing_info: Store pairing info received
* @initiate_locality_check: Prepare LC_Init
* @verify_lprime: Verify lprime
* @get_session_key: Prepare SKE_Send_Eks
* @repeater_check_flow_prepare_ack: Validate the Downstream topology
* and prepare rep_ack
* @verify_mprime: Verify mprime
* @enable_hdcp_authentication: Mark a port as authenticated.
* @close_hdcp_session: Close the Wired HDCP Tx session per port.
* This also disables the authenticated state of the port.
*/
struct i915_hdcp_component_ops {
/**
* @owner: mei_hdcp module
*/
struct module *owner;
int (*initiate_hdcp2_session)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data);
int (*verify_receiver_cert_prepare_km)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert
*rx_cert,
bool *km_stored,
struct hdcp2_ake_no_stored_km
*ek_pub_km,
size_t *msg_sz);
int (*verify_hprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime);
int (*store_pairing_info)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info
*pairing_info);
int (*initiate_locality_check)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data);
int (*verify_lprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime);
int (*get_session_key)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ske_send_eks *ske_data);
int (*repeater_check_flow_prepare_ack)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
*rep_topology,
struct hdcp2_rep_send_ack
*rep_send_ack);
int (*verify_mprime)(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_stream_ready *stream_ready);
int (*enable_hdcp_authentication)(struct device *dev,
struct hdcp_port_data *data);
int (*close_hdcp_session)(struct device *dev,
struct hdcp_port_data *data);
};
/**
* struct i915_hdcp_component_master - Used for communication between i915
* and mei_hdcp drivers for the HDCP2.2 services
* @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
* @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
*/
struct i915_hdcp_comp_master {
struct device *mei_dev;
const struct i915_hdcp_component_ops *ops;
/* To protect the above members. */
struct mutex mutex;
};
#endif /* _I915_MEI_HDCP_INTERFACE_H_ */