f8475861fe4160568e5912f1b28b3a9cb090c422
[openwrt/openwrt.git] / target / linux / bcm27xx / patches-5.15 / 950-0018-drm-vc4-Increase-the-core-clock-based-on-HVS-load.patch
1 From 99c821dc4cd65ff067e2dfff4a47ceb5aa61ad0c Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Wed, 26 May 2021 16:13:02 +0200
4 Subject: [PATCH] drm/vc4: Increase the core clock based on HVS load
5
6 Depending on a given HVS output (HVS to PixelValves) and input (planes
7 attached to a channel) load, the HVS needs for the core clock to be
8 raised above its boot time default.
9
10 Failing to do so will result in a vblank timeout and a stalled display
11 pipeline.
12
13 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
14 ---
15 drivers/gpu/drm/vc4/vc4_crtc.c | 15 +++++
16 drivers/gpu/drm/vc4/vc4_drv.h | 2 +
17 drivers/gpu/drm/vc4/vc4_kms.c | 112 ++++++++++++++++++++++++++++++---
18 3 files changed, 119 insertions(+), 10 deletions(-)
19
20 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
21 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
22 @@ -661,12 +661,27 @@ static int vc4_crtc_atomic_check(struct
23 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
24 struct drm_connector *conn;
25 struct drm_connector_state *conn_state;
26 + struct drm_encoder *encoder;
27 int ret, i;
28
29 ret = vc4_hvs_atomic_check(crtc, state);
30 if (ret)
31 return ret;
32
33 + encoder = vc4_get_crtc_encoder(crtc, crtc_state);
34 + if (encoder) {
35 + const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
36 + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
37 +
38 + mode = &crtc_state->adjusted_mode;
39 + if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
40 + vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
41 + mode->clock * 9 / 10) * 1000;
42 + } else {
43 + vc4_state->hvs_load = mode->clock * 1000;
44 + }
45 + }
46 +
47 for_each_new_connector_in_state(state, conn, conn_state,
48 i) {
49 if (conn_state->crtc != crtc)
50 --- a/drivers/gpu/drm/vc4/vc4_drv.h
51 +++ b/drivers/gpu/drm/vc4/vc4_drv.h
52 @@ -558,6 +558,8 @@ struct vc4_crtc_state {
53 unsigned int bottom;
54 } margins;
55
56 + unsigned long hvs_load;
57 +
58 /* Transitional state below, only valid during atomic commits */
59 bool update_muxing;
60 };
61 --- a/drivers/gpu/drm/vc4/vc4_kms.c
62 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
63 @@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_
64
65 struct vc4_hvs_state {
66 struct drm_private_state base;
67 + unsigned long core_clock_rate;
68
69 struct {
70 unsigned in_use: 1;
71 + unsigned long fifo_load;
72 struct drm_crtc_commit *pending_commit;
73 } fifo_state[HVS_NUM_CHANNELS];
74 };
75 @@ -339,11 +341,20 @@ static void vc4_atomic_commit_tail(struc
76 struct vc4_dev *vc4 = to_vc4_dev(dev);
77 struct vc4_hvs *hvs = vc4->hvs;
78 struct drm_crtc_state *new_crtc_state;
79 + struct vc4_hvs_state *new_hvs_state;
80 struct drm_crtc *crtc;
81 struct vc4_hvs_state *old_hvs_state;
82 unsigned int channel;
83 int i;
84
85 + old_hvs_state = vc4_hvs_get_old_global_state(state);
86 + if (WARN_ON(!old_hvs_state))
87 + return;
88 +
89 + new_hvs_state = vc4_hvs_get_new_global_state(state);
90 + if (WARN_ON(!new_hvs_state))
91 + return;
92 +
93 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
94 struct vc4_crtc_state *vc4_crtc_state;
95
96 @@ -354,10 +365,6 @@ static void vc4_atomic_commit_tail(struc
97 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
98 }
99
100 - old_hvs_state = vc4_hvs_get_old_global_state(state);
101 - if (IS_ERR(old_hvs_state))
102 - return;
103 -
104 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
105 struct drm_crtc_commit *commit;
106 int ret;
107 @@ -377,8 +384,13 @@ static void vc4_atomic_commit_tail(struc
108 old_hvs_state->fifo_state[channel].pending_commit = NULL;
109 }
110
111 - if (vc4->hvs->hvs5)
112 - clk_set_min_rate(hvs->core_clk, 500000000);
113 + if (vc4->hvs->hvs5) {
114 + unsigned long core_rate = max_t(unsigned long,
115 + 500000000,
116 + new_hvs_state->core_clock_rate);
117 +
118 + clk_set_min_rate(hvs->core_clk, core_rate);
119 + }
120
121 drm_atomic_helper_commit_modeset_disables(dev, state);
122
123 @@ -401,8 +413,12 @@ static void vc4_atomic_commit_tail(struc
124
125 drm_atomic_helper_cleanup_planes(dev, state);
126
127 - if (vc4->hvs->hvs5)
128 - clk_set_min_rate(hvs->core_clk, 0);
129 + if (vc4->hvs->hvs5) {
130 + drm_dbg(dev, "Running the core clock at %lu Hz\n",
131 + new_hvs_state->core_clock_rate);
132 +
133 + clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
134 + }
135 }
136
137 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
138 @@ -659,11 +675,13 @@ vc4_hvs_channels_duplicate_state(struct
139
140 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
141
142 -
143 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
144 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
145 + state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
146 }
147
148 + state->core_clock_rate = old_state->core_clock_rate;
149 +
150 return &state->base;
151 }
152
153 @@ -819,6 +837,76 @@ static int vc4_pv_muxing_atomic_check(st
154 }
155
156 static int
157 +vc4_core_clock_atomic_check(struct drm_atomic_state *state)
158 +{
159 + struct vc4_dev *vc4 = to_vc4_dev(state->dev);
160 + struct drm_private_state *priv_state;
161 + struct vc4_hvs_state *hvs_new_state;
162 + struct vc4_load_tracker_state *load_state;
163 + struct drm_crtc_state *old_crtc_state, *new_crtc_state;
164 + struct drm_crtc *crtc;
165 + unsigned int num_outputs;
166 + unsigned long pixel_rate;
167 + unsigned long cob_rate;
168 + unsigned int i;
169 +
170 + priv_state = drm_atomic_get_private_obj_state(state,
171 + &vc4->load_tracker);
172 + if (IS_ERR(priv_state))
173 + return PTR_ERR(priv_state);
174 +
175 + load_state = to_vc4_load_tracker_state(priv_state);
176 +
177 + hvs_new_state = vc4_hvs_get_global_state(state);
178 + if (!hvs_new_state)
179 + return -EINVAL;
180 +
181 + for_each_oldnew_crtc_in_state(state, crtc,
182 + old_crtc_state,
183 + new_crtc_state,
184 + i) {
185 + if (old_crtc_state->active) {
186 + struct vc4_crtc_state *old_vc4_state =
187 + to_vc4_crtc_state(old_crtc_state);
188 + unsigned int channel = old_vc4_state->assigned_channel;
189 +
190 + hvs_new_state->fifo_state[channel].fifo_load = 0;
191 + }
192 +
193 + if (new_crtc_state->active) {
194 + struct vc4_crtc_state *new_vc4_state =
195 + to_vc4_crtc_state(new_crtc_state);
196 + unsigned int channel = new_vc4_state->assigned_channel;
197 +
198 + hvs_new_state->fifo_state[channel].fifo_load =
199 + new_vc4_state->hvs_load;
200 + }
201 + }
202 +
203 + cob_rate = 0;
204 + num_outputs = 0;
205 + for (i = 0; i < HVS_NUM_CHANNELS; i++) {
206 + if (!hvs_new_state->fifo_state[i].in_use)
207 + continue;
208 +
209 + num_outputs++;
210 + cob_rate += hvs_new_state->fifo_state[i].fifo_load;
211 + }
212 +
213 + pixel_rate = load_state->hvs_load;
214 + if (num_outputs > 1) {
215 + pixel_rate = (pixel_rate * 40) / 100;
216 + } else {
217 + pixel_rate = (pixel_rate * 60) / 100;
218 + }
219 +
220 + hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
221 +
222 + return 0;
223 +}
224 +
225 +
226 +static int
227 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
228 {
229 int ret;
230 @@ -835,7 +923,11 @@ vc4_atomic_check(struct drm_device *dev,
231 if (ret)
232 return ret;
233
234 - return vc4_load_tracker_atomic_check(state);
235 + ret = vc4_load_tracker_atomic_check(state);
236 + if (ret)
237 + return ret;
238 +
239 + return vc4_core_clock_atomic_check(state);
240 }
241
242 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {