mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-29 07:02:29 -04:00
drm/amd/display: Enable Seamless Boot Transition for Multiple Streams
[why] dc previously had bugs that interfered with the ability to inherit a timing from a device with multiple streams (without flash/blanking). After this fix there is still a dependency on UEFI support. [how] fixed 3 bugs: loaded MPC state, changed bw_optimize flag to a counter instead of a boolean, and reading dpp/disp clk from HW to ensure we don't raise the clock's when we're not supposed to. Signed-off-by: Martin Leung <martin.leung@amd.com> Reviewed-by: Anthony Koo <Anthony.Koo@amd.com> Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
4a8ca46bae
commit
ccce745c28
@@ -27,6 +27,7 @@
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
#include "dcn20_clk_mgr.h"
|
||||
#include "reg_helper.h"
|
||||
#include "core_types.h"
|
||||
#include "dm_helpers.h"
|
||||
@@ -161,6 +162,9 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
dc->debug.force_clock_mode & 0x1) {
|
||||
//this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3.
|
||||
force_reset = true;
|
||||
|
||||
dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
|
||||
|
||||
//force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level.
|
||||
}
|
||||
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
|
||||
@@ -339,6 +343,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
uint32_t dispclk_wdivider;
|
||||
uint32_t dppclk_wdivider;
|
||||
int disp_divider;
|
||||
int dpp_divider;
|
||||
|
||||
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider);
|
||||
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider);
|
||||
|
||||
disp_divider = dentist_get_divider_from_did(dispclk_wdivider);
|
||||
dpp_divider = dentist_get_divider_from_did(dispclk_wdivider);
|
||||
|
||||
if (disp_divider && dpp_divider) {
|
||||
/* Calculate the current DFS clock, in kHz.*/
|
||||
clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->base.dentist_vco_freq_khz) / disp_divider;
|
||||
|
||||
clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
|
||||
* clk_mgr->base.dentist_vco_freq_khz) / dpp_divider;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||
struct dc_state *context,
|
||||
enum dc_clock_type clock_type,
|
||||
|
||||
@@ -51,4 +51,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
|
||||
struct dc_clock_config *clock_cfg);
|
||||
|
||||
void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base);
|
||||
|
||||
|
||||
#endif //__DCN20_CLK_MGR_H__
|
||||
|
||||
@@ -835,6 +835,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
full_pipe_count,
|
||||
dc->res_pool->stream_enc_count);
|
||||
|
||||
dc->optimize_seamless_boot_streams = 0;
|
||||
dc->caps.max_links = dc->link_count;
|
||||
dc->caps.max_audios = dc->res_pool->audio_count;
|
||||
dc->caps.linear_pitch_alignment = 64;
|
||||
@@ -1178,10 +1179,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
if (context->streams[i]->apply_seamless_boot_optimization)
|
||||
dc->optimize_seamless_boot = true;
|
||||
dc->optimize_seamless_boot_streams++;
|
||||
}
|
||||
|
||||
if (!dc->optimize_seamless_boot)
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
|
||||
/* re-program planes for existing stream, in case we need to
|
||||
@@ -1254,7 +1255,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
|
||||
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
|
||||
|
||||
if (!dc->optimize_seamless_boot) {
|
||||
if (dc->optimize_seamless_boot_streams == 0) {
|
||||
/* Must wait for no flips to be pending before doing optimize bw */
|
||||
wait_for_no_pipes_pending(dc, context);
|
||||
/* pplib is notified if disp_num changed */
|
||||
@@ -1300,7 +1301,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
int i;
|
||||
struct dc_state *context = dc->current_state;
|
||||
|
||||
if (!dc->optimized_required || dc->optimize_seamless_boot)
|
||||
if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
|
||||
return true;
|
||||
|
||||
post_surface_trace(dc);
|
||||
@@ -2084,7 +2085,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||
|
||||
dc->hwss.optimize_bandwidth(dc, dc->current_state);
|
||||
} else {
|
||||
if (!dc->optimize_seamless_boot)
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||
|
||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||
@@ -2125,7 +2126,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
int i, j;
|
||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||
|
||||
if (dc->optimize_seamless_boot && surface_count > 0) {
|
||||
if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) {
|
||||
/* Optimize seamless boot flag keeps clocks and watermarks high until
|
||||
* first flip. After first flip, optimization is required to lower
|
||||
* bandwidth. Important to note that it is expected UEFI will
|
||||
@@ -2134,12 +2135,14 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
*/
|
||||
if (stream->apply_seamless_boot_optimization) {
|
||||
stream->apply_seamless_boot_optimization = false;
|
||||
dc->optimize_seamless_boot = false;
|
||||
dc->optimized_required = true;
|
||||
dc->optimize_seamless_boot_streams--;
|
||||
|
||||
if (dc->optimize_seamless_boot_streams == 0)
|
||||
dc->optimized_required = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
|
||||
if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) {
|
||||
dc->hwss.prepare_bandwidth(dc, context);
|
||||
context_clock_trace(dc, context);
|
||||
}
|
||||
|
||||
@@ -1910,8 +1910,26 @@ static int acquire_resource_from_hw_enabled_state(
|
||||
pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
|
||||
pipe_ctx->stream_res.opp = pool->opps[tg_inst];
|
||||
|
||||
if (pool->dpps[tg_inst])
|
||||
if (pool->dpps[tg_inst]) {
|
||||
pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
|
||||
|
||||
// Read DPP->MPCC->OPP Pipe from HW State
|
||||
if (pool->mpc->funcs->read_mpcc_state) {
|
||||
struct mpcc_state s = {0};
|
||||
|
||||
pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
|
||||
|
||||
if (s.dpp_id < MAX_MPCC)
|
||||
pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
|
||||
|
||||
if (s.bot_mpcc_id < MAX_MPCC)
|
||||
pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
|
||||
&pool->mpc->mpcc_array[s.bot_mpcc_id];
|
||||
|
||||
if (s.opp_id < MAX_OPP)
|
||||
pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
|
||||
}
|
||||
}
|
||||
pipe_ctx->pipe_idx = tg_inst;
|
||||
|
||||
pipe_ctx->stream = stream;
|
||||
|
||||
@@ -513,7 +513,7 @@ struct dc {
|
||||
bool optimized_required;
|
||||
|
||||
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
|
||||
bool optimize_seamless_boot;
|
||||
int optimize_seamless_boot_streams;
|
||||
|
||||
/* FBC compressor */
|
||||
struct compressor *fbc_compressor;
|
||||
|
||||
Reference in New Issue
Block a user