Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/drm_atomic_helper.c
29278 views
1
/*
2
* Copyright (C) 2014 Red Hat
3
* Copyright (C) 2014 Intel Corp.
4
*
5
* Permission is hereby granted, free of charge, to any person obtaining a
6
* copy of this software and associated documentation files (the "Software"),
7
* to deal in the Software without restriction, including without limitation
8
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
* and/or sell copies of the Software, and to permit persons to whom the
10
* Software is furnished to do so, subject to the following conditions:
11
*
12
* The above copyright notice and this permission notice shall be included in
13
* all copies or substantial portions of the Software.
14
*
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
* OTHER DEALINGS IN THE SOFTWARE.
22
*
23
* Authors:
24
* Rob Clark <[email protected]>
25
* Daniel Vetter <[email protected]>
26
*/
27
28
#include <linux/export.h>
29
#include <linux/dma-fence.h>
30
#include <linux/ktime.h>
31
32
#include <drm/drm_atomic.h>
33
#include <drm/drm_atomic_helper.h>
34
#include <drm/drm_atomic_uapi.h>
35
#include <drm/drm_blend.h>
36
#include <drm/drm_bridge.h>
37
#include <drm/drm_damage_helper.h>
38
#include <drm/drm_device.h>
39
#include <drm/drm_drv.h>
40
#include <drm/drm_framebuffer.h>
41
#include <drm/drm_gem_atomic_helper.h>
42
#include <drm/drm_panic.h>
43
#include <drm/drm_print.h>
44
#include <drm/drm_self_refresh_helper.h>
45
#include <drm/drm_vblank.h>
46
#include <drm/drm_writeback.h>
47
48
#include "drm_crtc_helper_internal.h"
49
#include "drm_crtc_internal.h"
50
51
/**
52
* DOC: overview
53
*
54
* This helper library provides implementations of check and commit functions on
55
* top of the CRTC modeset helper callbacks and the plane helper callbacks. It
56
* also provides convenience implementations for the atomic state handling
57
* callbacks for drivers which don't need to subclass the drm core structures to
58
* add their own additional internal state.
59
*
60
* This library also provides default implementations for the check callback in
61
* drm_atomic_helper_check() and for the commit callback with
62
* drm_atomic_helper_commit(). But the individual stages and callbacks are
63
* exposed to allow drivers to mix and match and e.g. use the plane helpers only
64
* together with a driver private modeset implementation.
65
*
66
* This library also provides implementations for all the legacy driver
67
* interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
68
* drm_atomic_helper_disable_plane(), and the various functions to implement
69
* set_property callbacks. New drivers must not implement these functions
70
* themselves but must use the provided helpers.
71
*
72
* The atomic helper uses the same function table structures as all other
73
* modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
74
* struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
75
* also shares the &struct drm_plane_helper_funcs function table with the plane
76
* helpers.
77
*/
78
static void
79
drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
80
struct drm_plane_state *old_plane_state,
81
struct drm_plane_state *plane_state,
82
struct drm_plane *plane)
83
{
84
struct drm_crtc_state *crtc_state;
85
86
if (old_plane_state->crtc) {
87
crtc_state = drm_atomic_get_new_crtc_state(state,
88
old_plane_state->crtc);
89
90
if (WARN_ON(!crtc_state))
91
return;
92
93
crtc_state->planes_changed = true;
94
}
95
96
if (plane_state->crtc) {
97
crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
98
99
if (WARN_ON(!crtc_state))
100
return;
101
102
crtc_state->planes_changed = true;
103
}
104
}
105
106
static int handle_conflicting_encoders(struct drm_atomic_state *state,
107
bool disable_conflicting_encoders)
108
{
109
struct drm_connector_state *new_conn_state;
110
struct drm_connector *connector;
111
struct drm_connector_list_iter conn_iter;
112
struct drm_encoder *encoder;
113
unsigned int encoder_mask = 0;
114
int i, ret = 0;
115
116
/*
117
* First loop, find all newly assigned encoders from the connectors
118
* part of the state. If the same encoder is assigned to multiple
119
* connectors bail out.
120
*/
121
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
122
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
123
struct drm_encoder *new_encoder;
124
125
if (!new_conn_state->crtc)
126
continue;
127
128
if (funcs->atomic_best_encoder)
129
new_encoder = funcs->atomic_best_encoder(connector,
130
state);
131
else if (funcs->best_encoder)
132
new_encoder = funcs->best_encoder(connector);
133
else
134
new_encoder = drm_connector_get_single_encoder(connector);
135
136
if (new_encoder) {
137
if (encoder_mask & drm_encoder_mask(new_encoder)) {
138
drm_dbg_atomic(connector->dev,
139
"[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
140
new_encoder->base.id, new_encoder->name,
141
connector->base.id, connector->name);
142
143
return -EINVAL;
144
}
145
146
encoder_mask |= drm_encoder_mask(new_encoder);
147
}
148
}
149
150
if (!encoder_mask)
151
return 0;
152
153
/*
154
* Second loop, iterate over all connectors not part of the state.
155
*
156
* If a conflicting encoder is found and disable_conflicting_encoders
157
* is not set, an error is returned. Userspace can provide a solution
158
* through the atomic ioctl.
159
*
160
* If the flag is set conflicting connectors are removed from the CRTC
161
* and the CRTC is disabled if no encoder is left. This preserves
162
* compatibility with the legacy set_config behavior.
163
*/
164
drm_connector_list_iter_begin(state->dev, &conn_iter);
165
drm_for_each_connector_iter(connector, &conn_iter) {
166
struct drm_crtc_state *crtc_state;
167
168
if (drm_atomic_get_new_connector_state(state, connector))
169
continue;
170
171
encoder = connector->state->best_encoder;
172
if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
173
continue;
174
175
if (!disable_conflicting_encoders) {
176
drm_dbg_atomic(connector->dev,
177
"[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
178
encoder->base.id, encoder->name,
179
connector->state->crtc->base.id,
180
connector->state->crtc->name,
181
connector->base.id, connector->name);
182
ret = -EINVAL;
183
goto out;
184
}
185
186
new_conn_state = drm_atomic_get_connector_state(state, connector);
187
if (IS_ERR(new_conn_state)) {
188
ret = PTR_ERR(new_conn_state);
189
goto out;
190
}
191
192
drm_dbg_atomic(connector->dev,
193
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
194
encoder->base.id, encoder->name,
195
new_conn_state->crtc->base.id, new_conn_state->crtc->name,
196
connector->base.id, connector->name);
197
198
crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
199
200
ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
201
if (ret)
202
goto out;
203
204
if (!crtc_state->connector_mask) {
205
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
206
NULL);
207
if (ret < 0)
208
goto out;
209
210
crtc_state->active = false;
211
}
212
}
213
out:
214
drm_connector_list_iter_end(&conn_iter);
215
216
return ret;
217
}
218
219
static void
220
set_best_encoder(struct drm_atomic_state *state,
221
struct drm_connector_state *conn_state,
222
struct drm_encoder *encoder)
223
{
224
struct drm_crtc_state *crtc_state;
225
struct drm_crtc *crtc;
226
227
if (conn_state->best_encoder) {
228
/* Unset the encoder_mask in the old crtc state. */
229
crtc = conn_state->connector->state->crtc;
230
231
/* A NULL crtc is an error here because we should have
232
* duplicated a NULL best_encoder when crtc was NULL.
233
* As an exception restoring duplicated atomic state
234
* during resume is allowed, so don't warn when
235
* best_encoder is equal to encoder we intend to set.
236
*/
237
WARN_ON(!crtc && encoder != conn_state->best_encoder);
238
if (crtc) {
239
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
240
241
crtc_state->encoder_mask &=
242
~drm_encoder_mask(conn_state->best_encoder);
243
}
244
}
245
246
if (encoder) {
247
crtc = conn_state->crtc;
248
WARN_ON(!crtc);
249
if (crtc) {
250
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
251
252
crtc_state->encoder_mask |=
253
drm_encoder_mask(encoder);
254
}
255
}
256
257
conn_state->best_encoder = encoder;
258
}
259
260
static void
261
steal_encoder(struct drm_atomic_state *state,
262
struct drm_encoder *encoder)
263
{
264
struct drm_crtc_state *crtc_state;
265
struct drm_connector *connector;
266
struct drm_connector_state *old_connector_state, *new_connector_state;
267
int i;
268
269
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
270
struct drm_crtc *encoder_crtc;
271
272
if (new_connector_state->best_encoder != encoder)
273
continue;
274
275
encoder_crtc = old_connector_state->crtc;
276
277
drm_dbg_atomic(encoder->dev,
278
"[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
279
encoder->base.id, encoder->name,
280
encoder_crtc->base.id, encoder_crtc->name);
281
282
set_best_encoder(state, new_connector_state, NULL);
283
284
crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
285
crtc_state->connectors_changed = true;
286
287
return;
288
}
289
}
290
291
static int
292
update_connector_routing(struct drm_atomic_state *state,
293
struct drm_connector *connector,
294
struct drm_connector_state *old_connector_state,
295
struct drm_connector_state *new_connector_state,
296
bool added_by_user)
297
{
298
const struct drm_connector_helper_funcs *funcs;
299
struct drm_encoder *new_encoder;
300
struct drm_crtc_state *crtc_state;
301
302
drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n",
303
connector->base.id, connector->name);
304
305
if (old_connector_state->crtc != new_connector_state->crtc) {
306
if (old_connector_state->crtc) {
307
crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
308
crtc_state->connectors_changed = true;
309
}
310
311
if (new_connector_state->crtc) {
312
crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
313
crtc_state->connectors_changed = true;
314
}
315
}
316
317
if (!new_connector_state->crtc) {
318
drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n",
319
connector->base.id, connector->name);
320
321
set_best_encoder(state, new_connector_state, NULL);
322
323
return 0;
324
}
325
326
crtc_state = drm_atomic_get_new_crtc_state(state,
327
new_connector_state->crtc);
328
/*
329
* For compatibility with legacy users, we want to make sure that
330
* we allow DPMS On->Off modesets on unregistered connectors. Modesets
331
* which would result in anything else must be considered invalid, to
332
* avoid turning on new displays on dead connectors.
333
*
334
* Since the connector can be unregistered at any point during an
335
* atomic check or commit, this is racy. But that's OK: all we care
336
* about is ensuring that userspace can't do anything but shut off the
337
* display on a connector that was destroyed after it's been notified,
338
* not before.
339
*
340
* Additionally, we also want to ignore connector registration when
341
* we're trying to restore an atomic state during system resume since
342
* there's a chance the connector may have been destroyed during the
343
* process, but it's better to ignore that then cause
344
* drm_atomic_helper_resume() to fail.
345
*
346
* Last, we want to ignore connector registration when the connector
347
* was not pulled in the atomic state by user-space (ie, was pulled
348
* in by the driver, e.g. when updating a DP-MST stream).
349
*/
350
if (!state->duplicated && drm_connector_is_unregistered(connector) &&
351
added_by_user && crtc_state->active) {
352
drm_dbg_atomic(connector->dev,
353
"[CONNECTOR:%d:%s] is not registered\n",
354
connector->base.id, connector->name);
355
return -EINVAL;
356
}
357
358
funcs = connector->helper_private;
359
360
if (funcs->atomic_best_encoder)
361
new_encoder = funcs->atomic_best_encoder(connector, state);
362
else if (funcs->best_encoder)
363
new_encoder = funcs->best_encoder(connector);
364
else
365
new_encoder = drm_connector_get_single_encoder(connector);
366
367
if (!new_encoder) {
368
drm_dbg_atomic(connector->dev,
369
"No suitable encoder found for [CONNECTOR:%d:%s]\n",
370
connector->base.id, connector->name);
371
return -EINVAL;
372
}
373
374
if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
375
drm_dbg_atomic(connector->dev,
376
"[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
377
new_encoder->base.id,
378
new_encoder->name,
379
new_connector_state->crtc->base.id,
380
new_connector_state->crtc->name);
381
return -EINVAL;
382
}
383
384
if (new_encoder == new_connector_state->best_encoder) {
385
set_best_encoder(state, new_connector_state, new_encoder);
386
387
drm_dbg_atomic(connector->dev,
388
"[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
389
connector->base.id,
390
connector->name,
391
new_encoder->base.id,
392
new_encoder->name,
393
new_connector_state->crtc->base.id,
394
new_connector_state->crtc->name);
395
396
return 0;
397
}
398
399
steal_encoder(state, new_encoder);
400
401
set_best_encoder(state, new_connector_state, new_encoder);
402
403
crtc_state->connectors_changed = true;
404
405
drm_dbg_atomic(connector->dev,
406
"[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
407
connector->base.id,
408
connector->name,
409
new_encoder->base.id,
410
new_encoder->name,
411
new_connector_state->crtc->base.id,
412
new_connector_state->crtc->name);
413
414
return 0;
415
}
416
417
static int
418
mode_fixup(struct drm_atomic_state *state)
419
{
420
struct drm_crtc *crtc;
421
struct drm_crtc_state *new_crtc_state;
422
struct drm_connector *connector;
423
struct drm_connector_state *new_conn_state;
424
int i;
425
int ret;
426
427
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
428
if (!new_crtc_state->mode_changed &&
429
!new_crtc_state->connectors_changed)
430
continue;
431
432
drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
433
}
434
435
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
436
const struct drm_encoder_helper_funcs *funcs;
437
struct drm_encoder *encoder;
438
struct drm_bridge *bridge;
439
440
WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
441
442
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
443
continue;
444
445
new_crtc_state =
446
drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
447
448
/*
449
* Each encoder has at most one connector (since we always steal
450
* it away), so we won't call ->mode_fixup twice.
451
*/
452
encoder = new_conn_state->best_encoder;
453
funcs = encoder->helper_private;
454
455
bridge = drm_bridge_chain_get_first_bridge(encoder);
456
ret = drm_atomic_bridge_chain_check(bridge,
457
new_crtc_state,
458
new_conn_state);
459
drm_bridge_put(bridge);
460
if (ret) {
461
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
462
return ret;
463
}
464
465
if (funcs && funcs->atomic_check) {
466
ret = funcs->atomic_check(encoder, new_crtc_state,
467
new_conn_state);
468
if (ret) {
469
drm_dbg_atomic(encoder->dev,
470
"[ENCODER:%d:%s] check failed\n",
471
encoder->base.id, encoder->name);
472
return ret;
473
}
474
} else if (funcs && funcs->mode_fixup) {
475
ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
476
&new_crtc_state->adjusted_mode);
477
if (!ret) {
478
drm_dbg_atomic(encoder->dev,
479
"[ENCODER:%d:%s] fixup failed\n",
480
encoder->base.id, encoder->name);
481
return -EINVAL;
482
}
483
}
484
}
485
486
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
487
const struct drm_crtc_helper_funcs *funcs;
488
489
if (!new_crtc_state->enable)
490
continue;
491
492
if (!new_crtc_state->mode_changed &&
493
!new_crtc_state->connectors_changed)
494
continue;
495
496
funcs = crtc->helper_private;
497
if (!funcs || !funcs->mode_fixup)
498
continue;
499
500
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
501
&new_crtc_state->adjusted_mode);
502
if (!ret) {
503
drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n",
504
crtc->base.id, crtc->name);
505
return -EINVAL;
506
}
507
}
508
509
return 0;
510
}
511
512
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
513
struct drm_encoder *encoder,
514
struct drm_crtc *crtc,
515
const struct drm_display_mode *mode)
516
{
517
struct drm_bridge *bridge;
518
enum drm_mode_status ret;
519
520
ret = drm_encoder_mode_valid(encoder, mode);
521
if (ret != MODE_OK) {
522
drm_dbg_atomic(encoder->dev,
523
"[ENCODER:%d:%s] mode_valid() failed\n",
524
encoder->base.id, encoder->name);
525
return ret;
526
}
527
528
bridge = drm_bridge_chain_get_first_bridge(encoder);
529
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
530
mode);
531
drm_bridge_put(bridge);
532
if (ret != MODE_OK) {
533
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
534
return ret;
535
}
536
537
ret = drm_crtc_mode_valid(crtc, mode);
538
if (ret != MODE_OK) {
539
drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n",
540
crtc->base.id, crtc->name);
541
return ret;
542
}
543
544
return ret;
545
}
546
547
static int
548
mode_valid(struct drm_atomic_state *state)
549
{
550
struct drm_connector_state *conn_state;
551
struct drm_connector *connector;
552
int i;
553
554
for_each_new_connector_in_state(state, connector, conn_state, i) {
555
struct drm_encoder *encoder = conn_state->best_encoder;
556
struct drm_crtc *crtc = conn_state->crtc;
557
struct drm_crtc_state *crtc_state;
558
enum drm_mode_status mode_status;
559
const struct drm_display_mode *mode;
560
561
if (!crtc || !encoder)
562
continue;
563
564
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
565
if (!crtc_state)
566
continue;
567
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
568
continue;
569
570
mode = &crtc_state->mode;
571
572
mode_status = mode_valid_path(connector, encoder, crtc, mode);
573
if (mode_status != MODE_OK)
574
return -EINVAL;
575
}
576
577
return 0;
578
}
579
580
static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
581
struct drm_crtc *crtc)
582
{
583
struct drm_encoder *drm_enc;
584
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
585
crtc);
586
587
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
588
if (!drm_enc->possible_clones) {
589
DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
590
continue;
591
}
592
593
if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
594
crtc_state->encoder_mask) {
595
DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
596
crtc->base.id, crtc_state->encoder_mask);
597
return -EINVAL;
598
}
599
}
600
601
return 0;
602
}
603
604
/**
605
* drm_atomic_helper_check_modeset - validate state object for modeset changes
606
* @dev: DRM device
607
* @state: the driver state object
608
*
609
* Check the state object to see if the requested state is physically possible.
610
* This does all the CRTC and connector related computations for an atomic
611
* update and adds any additional connectors needed for full modesets. It calls
612
* the various per-object callbacks in the follow order:
613
*
614
* 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
615
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
616
* 3. If it's determined a modeset is needed then all connectors on the affected
617
* CRTC are added and &drm_connector_helper_funcs.atomic_check is run on them.
618
* 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
619
* &drm_crtc_helper_funcs.mode_valid are called on the affected components.
620
* 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
621
* 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
622
* This function is only called when the encoder will be part of a configured CRTC,
623
* it must not be used for implementing connector property validation.
624
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
625
* instead.
626
* 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with CRTC constraints.
627
*
628
* &drm_crtc_state.mode_changed is set when the input mode is changed.
629
* &drm_crtc_state.connectors_changed is set when a connector is added or
630
* removed from the CRTC. &drm_crtc_state.active_changed is set when
631
* &drm_crtc_state.active changes, which is used for DPMS.
632
* &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
633
* See also: drm_atomic_crtc_needs_modeset()
634
*
635
* IMPORTANT:
636
*
637
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
638
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
639
* without a full modeset) _must_ call this function after that change. It is
640
* permitted to call this function multiple times for the same update, e.g.
641
* when the &drm_crtc_helper_funcs.atomic_check functions depend upon the
642
* adjusted dotclock for fifo space allocation and watermark computation.
643
*
644
* RETURNS:
645
* Zero for success or -errno
646
*/
647
int
648
drm_atomic_helper_check_modeset(struct drm_device *dev,
649
struct drm_atomic_state *state)
650
{
651
struct drm_crtc *crtc;
652
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
653
struct drm_connector *connector;
654
struct drm_connector_state *old_connector_state, *new_connector_state;
655
int i, ret;
656
unsigned int connectors_mask = 0, user_connectors_mask = 0;
657
658
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
659
user_connectors_mask |= BIT(i);
660
661
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
662
bool has_connectors =
663
!!new_crtc_state->connector_mask;
664
665
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
666
667
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
668
drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n",
669
crtc->base.id, crtc->name);
670
new_crtc_state->mode_changed = true;
671
}
672
673
if (old_crtc_state->enable != new_crtc_state->enable) {
674
drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n",
675
crtc->base.id, crtc->name);
676
677
/*
678
* For clarity this assignment is done here, but
679
* enable == 0 is only true when there are no
680
* connectors and a NULL mode.
681
*
682
* The other way around is true as well. enable != 0
683
* implies that connectors are attached and a mode is set.
684
*/
685
new_crtc_state->mode_changed = true;
686
new_crtc_state->connectors_changed = true;
687
}
688
689
if (old_crtc_state->active != new_crtc_state->active) {
690
drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n",
691
crtc->base.id, crtc->name);
692
new_crtc_state->active_changed = true;
693
}
694
695
if (new_crtc_state->enable != has_connectors) {
696
drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch (%d/%d)\n",
697
crtc->base.id, crtc->name,
698
new_crtc_state->enable, has_connectors);
699
700
return -EINVAL;
701
}
702
703
if (drm_dev_has_vblank(dev))
704
new_crtc_state->no_vblank = false;
705
else
706
new_crtc_state->no_vblank = true;
707
}
708
709
ret = handle_conflicting_encoders(state, false);
710
if (ret)
711
return ret;
712
713
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
714
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
715
716
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
717
718
/*
719
* This only sets crtc->connectors_changed for routing changes,
720
* drivers must set crtc->connectors_changed themselves when
721
* connector properties need to be updated.
722
*/
723
ret = update_connector_routing(state, connector,
724
old_connector_state,
725
new_connector_state,
726
BIT(i) & user_connectors_mask);
727
if (ret)
728
return ret;
729
if (old_connector_state->crtc) {
730
new_crtc_state = drm_atomic_get_new_crtc_state(state,
731
old_connector_state->crtc);
732
if (old_connector_state->link_status !=
733
new_connector_state->link_status)
734
new_crtc_state->connectors_changed = true;
735
736
if (old_connector_state->max_requested_bpc !=
737
new_connector_state->max_requested_bpc)
738
new_crtc_state->connectors_changed = true;
739
}
740
741
if (funcs->atomic_check)
742
ret = funcs->atomic_check(connector, state);
743
if (ret) {
744
drm_dbg_atomic(dev,
745
"[CONNECTOR:%d:%s] driver check failed\n",
746
connector->base.id, connector->name);
747
return ret;
748
}
749
750
connectors_mask |= BIT(i);
751
}
752
753
/*
754
* After all the routing has been prepared we need to add in any
755
* connector which is itself unchanged, but whose CRTC changes its
756
* configuration. This must be done before calling mode_fixup in case a
757
* crtc only changed its mode but has the same set of connectors.
758
*/
759
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
760
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
761
continue;
762
763
drm_dbg_atomic(dev,
764
"[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
765
crtc->base.id, crtc->name,
766
new_crtc_state->enable ? 'y' : 'n',
767
new_crtc_state->active ? 'y' : 'n');
768
769
ret = drm_atomic_add_affected_connectors(state, crtc);
770
if (ret != 0)
771
return ret;
772
773
ret = drm_atomic_add_affected_planes(state, crtc);
774
if (ret != 0)
775
return ret;
776
777
ret = drm_atomic_check_valid_clones(state, crtc);
778
if (ret != 0)
779
return ret;
780
}
781
782
/*
783
* Iterate over all connectors again, to make sure atomic_check()
784
* has been called on them when a modeset is forced.
785
*/
786
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
787
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
788
789
if (connectors_mask & BIT(i))
790
continue;
791
792
if (funcs->atomic_check)
793
ret = funcs->atomic_check(connector, state);
794
if (ret) {
795
drm_dbg_atomic(dev,
796
"[CONNECTOR:%d:%s] driver check failed\n",
797
connector->base.id, connector->name);
798
return ret;
799
}
800
}
801
802
/*
803
* Iterate over all connectors again, and add all affected bridges to
804
* the state.
805
*/
806
for_each_oldnew_connector_in_state(state, connector,
807
old_connector_state,
808
new_connector_state, i) {
809
struct drm_encoder *encoder;
810
811
encoder = old_connector_state->best_encoder;
812
ret = drm_atomic_add_encoder_bridges(state, encoder);
813
if (ret)
814
return ret;
815
816
encoder = new_connector_state->best_encoder;
817
ret = drm_atomic_add_encoder_bridges(state, encoder);
818
if (ret)
819
return ret;
820
}
821
822
ret = mode_valid(state);
823
if (ret)
824
return ret;
825
826
return mode_fixup(state);
827
}
828
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
829
830
/**
831
* drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
832
* @connector: corresponding connector
833
* @state: the driver state object
834
*
835
* Checks if the writeback connector state is valid, and returns an error if it
836
* isn't.
837
*
838
* RETURNS:
839
* Zero for success or -errno
840
*/
841
int
842
drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
843
struct drm_atomic_state *state)
844
{
845
struct drm_connector_state *conn_state =
846
drm_atomic_get_new_connector_state(state, connector);
847
struct drm_writeback_job *wb_job = conn_state->writeback_job;
848
struct drm_property_blob *pixel_format_blob;
849
struct drm_framebuffer *fb;
850
size_t i, nformats;
851
u32 *formats;
852
853
if (!wb_job || !wb_job->fb)
854
return 0;
855
856
pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
857
nformats = pixel_format_blob->length / sizeof(u32);
858
formats = pixel_format_blob->data;
859
fb = wb_job->fb;
860
861
for (i = 0; i < nformats; i++)
862
if (fb->format->format == formats[i])
863
return 0;
864
865
drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
866
867
return -EINVAL;
868
}
869
EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
870
871
/**
872
* drm_atomic_helper_check_plane_state() - Check plane state for validity
873
* @plane_state: plane state to check
874
* @crtc_state: CRTC state to check
875
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
876
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
877
* @can_position: is it legal to position the plane such that it
878
* doesn't cover the entire CRTC? This will generally
879
* only be false for primary planes.
880
* @can_update_disabled: can the plane be updated while the CRTC
881
* is disabled?
882
*
883
* Checks that a desired plane update is valid, and updates various
884
* bits of derived state (clipped coordinates etc.). Drivers that provide
885
* their own plane handling rather than helper-provided implementations may
886
* still wish to call this function to avoid duplication of error checking
887
* code.
888
*
889
* RETURNS:
890
* Zero if update appears valid, error code on failure
891
*/
892
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
893
const struct drm_crtc_state *crtc_state,
894
int min_scale,
895
int max_scale,
896
bool can_position,
897
bool can_update_disabled)
898
{
899
struct drm_framebuffer *fb = plane_state->fb;
900
struct drm_rect *src = &plane_state->src;
901
struct drm_rect *dst = &plane_state->dst;
902
unsigned int rotation = plane_state->rotation;
903
struct drm_rect clip = {};
904
int hscale, vscale;
905
906
WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
907
908
*src = drm_plane_state_src(plane_state);
909
*dst = drm_plane_state_dest(plane_state);
910
911
if (!fb) {
912
plane_state->visible = false;
913
return 0;
914
}
915
916
/* crtc should only be NULL when disabling (i.e., !fb) */
917
if (WARN_ON(!plane_state->crtc)) {
918
plane_state->visible = false;
919
return 0;
920
}
921
922
if (!crtc_state->enable && !can_update_disabled) {
923
drm_dbg_kms(plane_state->plane->dev,
924
"Cannot update plane of a disabled CRTC.\n");
925
return -EINVAL;
926
}
927
928
drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
929
930
/* Check scaling */
931
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
932
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
933
if (hscale < 0 || vscale < 0) {
934
drm_dbg_kms(plane_state->plane->dev,
935
"Invalid scaling of plane\n");
936
drm_rect_debug_print("src: ", &plane_state->src, true);
937
drm_rect_debug_print("dst: ", &plane_state->dst, false);
938
return -ERANGE;
939
}
940
941
if (crtc_state->enable)
942
drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
943
944
plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
945
946
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
947
948
if (!plane_state->visible)
949
/*
950
* Plane isn't visible; some drivers can handle this
951
* so we just return success here. Drivers that can't
952
* (including those that use the primary plane helper's
953
* update function) will return an error from their
954
* update_plane handler.
955
*/
956
return 0;
957
958
if (!can_position && !drm_rect_equals(dst, &clip)) {
959
drm_dbg_kms(plane_state->plane->dev,
960
"Plane must cover entire CRTC\n");
961
drm_rect_debug_print("dst: ", dst, false);
962
drm_rect_debug_print("clip: ", &clip, false);
963
return -EINVAL;
964
}
965
966
return 0;
967
}
968
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
969
970
/**
971
* drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
972
* @crtc_state: CRTC state to check
973
*
974
* Checks that a CRTC has at least one primary plane attached to it, which is
975
* a requirement on some hardware. Note that this only involves the CRTC side
976
* of the test. To test if the primary plane is visible or if it can be updated
977
* without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
978
* the plane's atomic check.
979
*
980
* RETURNS:
981
* 0 if a primary plane is attached to the CRTC, or an error code otherwise
982
*/
983
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
984
{
985
struct drm_crtc *crtc = crtc_state->crtc;
986
struct drm_device *dev = crtc->dev;
987
struct drm_plane *plane;
988
989
/* needs at least one primary plane to be enabled */
990
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
991
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
992
return 0;
993
}
994
995
drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
996
997
return -EINVAL;
998
}
999
EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
1000
1001
/**
1002
* drm_atomic_helper_check_planes - validate state object for planes changes
1003
* @dev: DRM device
1004
* @state: the driver state object
1005
*
1006
* Check the state object to see if the requested state is physically possible.
1007
* This does all the plane update related checks using by calling into the
1008
* &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
1009
* hooks provided by the driver.
1010
*
1011
* It also sets &drm_crtc_state.planes_changed to indicate that a CRTC has
1012
* updated planes.
1013
*
1014
* RETURNS:
1015
* Zero for success or -errno
1016
*/
1017
int
1018
drm_atomic_helper_check_planes(struct drm_device *dev,
1019
struct drm_atomic_state *state)
1020
{
1021
struct drm_crtc *crtc;
1022
struct drm_crtc_state *new_crtc_state;
1023
struct drm_plane *plane;
1024
struct drm_plane_state *new_plane_state, *old_plane_state;
1025
int i, ret = 0;
1026
1027
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
1028
const struct drm_plane_helper_funcs *funcs;
1029
1030
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
1031
1032
funcs = plane->helper_private;
1033
1034
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
1035
1036
drm_atomic_helper_check_plane_damage(state, new_plane_state);
1037
1038
if (!funcs || !funcs->atomic_check)
1039
continue;
1040
1041
ret = funcs->atomic_check(plane, state);
1042
if (ret) {
1043
drm_dbg_atomic(plane->dev,
1044
"[PLANE:%d:%s] atomic driver check failed\n",
1045
plane->base.id, plane->name);
1046
return ret;
1047
}
1048
}
1049
1050
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1051
const struct drm_crtc_helper_funcs *funcs;
1052
1053
funcs = crtc->helper_private;
1054
1055
if (!funcs || !funcs->atomic_check)
1056
continue;
1057
1058
ret = funcs->atomic_check(crtc, state);
1059
if (ret) {
1060
drm_dbg_atomic(crtc->dev,
1061
"[CRTC:%d:%s] atomic driver check failed\n",
1062
crtc->base.id, crtc->name);
1063
return ret;
1064
}
1065
}
1066
1067
return ret;
1068
}
1069
EXPORT_SYMBOL(drm_atomic_helper_check_planes);
1070
1071
/**
1072
* drm_atomic_helper_check - validate state object
1073
* @dev: DRM device
1074
* @state: the driver state object
1075
*
1076
* Check the state object to see if the requested state is physically possible.
1077
* Only CRTCs and planes have check callbacks, so for any additional (global)
1078
* checking that a driver needs it can simply wrap that around this function.
1079
* Drivers without such needs can directly use this as their
1080
* &drm_mode_config_funcs.atomic_check callback.
1081
*
1082
* This just wraps the two parts of the state checking for planes and modeset
1083
* state in the default order: First it calls drm_atomic_helper_check_modeset()
1084
* and then drm_atomic_helper_check_planes(). The assumption is that the
1085
* @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
1086
* functions depend upon an updated adjusted_mode.clock to e.g. properly compute
1087
* watermarks.
1088
*
1089
* Note that zpos normalization will add all enable planes to the state which
1090
* might not desired for some drivers.
1091
* For example enable/disable of a cursor plane which have fixed zpos value
1092
* would trigger all other enabled planes to be forced to the state change.
1093
*
1094
* IMPORTANT:
1095
*
1096
* As this function calls drm_atomic_helper_check_modeset() internally, its
1097
* restrictions also apply:
1098
* Drivers which set &drm_crtc_state.mode_changed (e.g. in their
1099
* &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
1100
* without a full modeset) _must_ call drm_atomic_helper_check_modeset()
1101
* function again after that change.
1102
*
1103
* RETURNS:
1104
* Zero for success or -errno
1105
*/
1106
int drm_atomic_helper_check(struct drm_device *dev,
1107
struct drm_atomic_state *state)
1108
{
1109
int ret;
1110
1111
ret = drm_atomic_helper_check_modeset(dev, state);
1112
if (ret)
1113
return ret;
1114
1115
if (dev->mode_config.normalize_zpos) {
1116
ret = drm_atomic_normalize_zpos(dev, state);
1117
if (ret)
1118
return ret;
1119
}
1120
1121
ret = drm_atomic_helper_check_planes(dev, state);
1122
if (ret)
1123
return ret;
1124
1125
if (state->legacy_cursor_update)
1126
state->async_update = !drm_atomic_helper_async_check(dev, state);
1127
1128
drm_self_refresh_helper_alter_state(state);
1129
1130
return ret;
1131
}
1132
EXPORT_SYMBOL(drm_atomic_helper_check);
1133
1134
static bool
1135
crtc_needs_disable(struct drm_crtc_state *old_state,
1136
struct drm_crtc_state *new_state)
1137
{
1138
/*
1139
* No new_state means the CRTC is off, so the only criteria is whether
1140
* it's currently active or in self refresh mode.
1141
*/
1142
if (!new_state)
1143
return drm_atomic_crtc_effectively_active(old_state);
1144
1145
/*
1146
* We need to disable bridge(s) and CRTC if we're transitioning out of
1147
* self-refresh and changing CRTCs at the same time, because the
1148
* bridge tracks self-refresh status via CRTC state.
1149
*/
1150
if (old_state->self_refresh_active &&
1151
old_state->crtc != new_state->crtc)
1152
return true;
1153
1154
/*
1155
* We also need to run through the crtc_funcs->disable() function if
1156
* the CRTC is currently on, if it's transitioning to self refresh
1157
* mode, or if it's in self refresh mode and needs to be fully
1158
* disabled.
1159
*/
1160
return old_state->active ||
1161
(old_state->self_refresh_active && !new_state->active) ||
1162
new_state->self_refresh_active;
1163
}
1164
1165
static void
1166
encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
1167
{
1168
struct drm_connector *connector;
1169
struct drm_connector_state *old_conn_state, *new_conn_state;
1170
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1171
int i;
1172
1173
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1174
const struct drm_encoder_helper_funcs *funcs;
1175
struct drm_encoder *encoder;
1176
struct drm_bridge *bridge;
1177
1178
/*
1179
* Shut down everything that's in the changeset and currently
1180
* still on. So need to check the old, saved state.
1181
*/
1182
if (!old_conn_state->crtc)
1183
continue;
1184
1185
old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1186
1187
if (new_conn_state->crtc)
1188
new_crtc_state = drm_atomic_get_new_crtc_state(
1189
state,
1190
new_conn_state->crtc);
1191
else
1192
new_crtc_state = NULL;
1193
1194
if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1195
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1196
continue;
1197
1198
encoder = old_conn_state->best_encoder;
1199
1200
/* We shouldn't get this far if we didn't previously have
1201
* an encoder.. but WARN_ON() rather than explode.
1202
*/
1203
if (WARN_ON(!encoder))
1204
continue;
1205
1206
funcs = encoder->helper_private;
1207
1208
drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n",
1209
encoder->base.id, encoder->name);
1210
1211
/*
1212
* Each encoder has at most one connector (since we always steal
1213
* it away), so we won't call disable hooks twice.
1214
*/
1215
bridge = drm_bridge_chain_get_first_bridge(encoder);
1216
drm_atomic_bridge_chain_disable(bridge, state);
1217
drm_bridge_put(bridge);
1218
1219
/* Right function depends upon target state. */
1220
if (funcs) {
1221
if (funcs->atomic_disable)
1222
funcs->atomic_disable(encoder, state);
1223
else if (new_conn_state->crtc && funcs->prepare)
1224
funcs->prepare(encoder);
1225
else if (funcs->disable)
1226
funcs->disable(encoder);
1227
else if (funcs->dpms)
1228
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
1229
}
1230
}
1231
}
1232
1233
static void
1234
crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
1235
{
1236
struct drm_crtc *crtc;
1237
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1238
int i;
1239
1240
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1241
const struct drm_crtc_helper_funcs *funcs;
1242
int ret;
1243
1244
/* Shut down everything that needs a full modeset. */
1245
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1246
continue;
1247
1248
if (!crtc_needs_disable(old_crtc_state, new_crtc_state))
1249
continue;
1250
1251
funcs = crtc->helper_private;
1252
1253
drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n",
1254
crtc->base.id, crtc->name);
1255
1256
1257
/* Right function depends upon target state. */
1258
if (new_crtc_state->enable && funcs->prepare)
1259
funcs->prepare(crtc);
1260
else if (funcs->atomic_disable)
1261
funcs->atomic_disable(crtc, state);
1262
else if (funcs->disable)
1263
funcs->disable(crtc);
1264
else if (funcs->dpms)
1265
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
1266
1267
if (!drm_dev_has_vblank(dev))
1268
continue;
1269
1270
ret = drm_crtc_vblank_get(crtc);
1271
/*
1272
* Self-refresh is not a true "disable"; ensure vblank remains
1273
* enabled.
1274
*/
1275
if (new_crtc_state->self_refresh_active)
1276
WARN_ONCE(ret != 0,
1277
"driver disabled vblank in self-refresh\n");
1278
else
1279
WARN_ONCE(ret != -EINVAL,
1280
"driver forgot to call drm_crtc_vblank_off()\n");
1281
if (ret == 0)
1282
drm_crtc_vblank_put(crtc);
1283
}
1284
}
1285
1286
static void
1287
encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
1288
{
1289
struct drm_connector *connector;
1290
struct drm_connector_state *old_conn_state, *new_conn_state;
1291
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1292
int i;
1293
1294
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1295
struct drm_encoder *encoder;
1296
struct drm_bridge *bridge;
1297
1298
/*
1299
* Shut down everything that's in the changeset and currently
1300
* still on. So need to check the old, saved state.
1301
*/
1302
if (!old_conn_state->crtc)
1303
continue;
1304
1305
old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
1306
1307
if (new_conn_state->crtc)
1308
new_crtc_state = drm_atomic_get_new_crtc_state(state,
1309
new_conn_state->crtc);
1310
else
1311
new_crtc_state = NULL;
1312
1313
if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
1314
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
1315
continue;
1316
1317
encoder = old_conn_state->best_encoder;
1318
1319
/*
1320
* We shouldn't get this far if we didn't previously have
1321
* an encoder.. but WARN_ON() rather than explode.
1322
*/
1323
if (WARN_ON(!encoder))
1324
continue;
1325
1326
drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
1327
encoder->base.id, encoder->name);
1328
1329
/*
1330
* Each encoder has at most one connector (since we always steal
1331
* it away), so we won't call disable hooks twice.
1332
*/
1333
bridge = drm_bridge_chain_get_first_bridge(encoder);
1334
drm_atomic_bridge_chain_post_disable(bridge, state);
1335
drm_bridge_put(bridge);
1336
}
1337
}
1338
1339
static void
1340
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
1341
{
1342
encoder_bridge_disable(dev, state);
1343
1344
crtc_disable(dev, state);
1345
1346
encoder_bridge_post_disable(dev, state);
1347
}
1348
1349
/**
1350
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
1351
* @dev: DRM device
1352
* @state: atomic state object being committed
1353
*
1354
* This function updates all the various legacy modeset state pointers in
1355
* connectors, encoders and CRTCs.
1356
*
1357
* Drivers can use this for building their own atomic commit if they don't have
1358
* a pure helper-based modeset implementation.
1359
*
1360
* Since these updates are not synchronized with lockings, only code paths
1361
* called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
1362
* legacy state filled out by this helper. Defacto this means this helper and
1363
* the legacy state pointers are only really useful for transitioning an
1364
* existing driver to the atomic world.
1365
*/
1366
void
1367
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
1368
struct drm_atomic_state *state)
1369
{
1370
struct drm_connector *connector;
1371
struct drm_connector_state *old_conn_state, *new_conn_state;
1372
struct drm_crtc *crtc;
1373
struct drm_crtc_state *new_crtc_state;
1374
int i;
1375
1376
/* clear out existing links and update dpms */
1377
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
1378
if (connector->encoder) {
1379
WARN_ON(!connector->encoder->crtc);
1380
1381
connector->encoder->crtc = NULL;
1382
connector->encoder = NULL;
1383
}
1384
1385
crtc = new_conn_state->crtc;
1386
if ((!crtc && old_conn_state->crtc) ||
1387
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
1388
int mode = DRM_MODE_DPMS_OFF;
1389
1390
if (crtc && crtc->state->active)
1391
mode = DRM_MODE_DPMS_ON;
1392
1393
connector->dpms = mode;
1394
}
1395
}
1396
1397
/* set new links */
1398
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1399
if (!new_conn_state->crtc)
1400
continue;
1401
1402
if (WARN_ON(!new_conn_state->best_encoder))
1403
continue;
1404
1405
connector->encoder = new_conn_state->best_encoder;
1406
connector->encoder->crtc = new_conn_state->crtc;
1407
}
1408
1409
/* set legacy state in the crtc structure */
1410
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1411
struct drm_plane *primary = crtc->primary;
1412
struct drm_plane_state *new_plane_state;
1413
1414
crtc->mode = new_crtc_state->mode;
1415
crtc->enabled = new_crtc_state->enable;
1416
1417
new_plane_state =
1418
drm_atomic_get_new_plane_state(state, primary);
1419
1420
if (new_plane_state && new_plane_state->crtc == crtc) {
1421
crtc->x = new_plane_state->src_x >> 16;
1422
crtc->y = new_plane_state->src_y >> 16;
1423
}
1424
}
1425
}
1426
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
1427
1428
/**
1429
* drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
1430
* @state: atomic state object
1431
*
1432
* Updates the timestamping constants used for precise vblank timestamps
1433
* by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
1434
*/
1435
void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
1436
{
1437
struct drm_crtc_state *new_crtc_state;
1438
struct drm_crtc *crtc;
1439
int i;
1440
1441
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1442
if (new_crtc_state->enable)
1443
drm_calc_timestamping_constants(crtc,
1444
&new_crtc_state->adjusted_mode);
1445
}
1446
}
1447
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
1448
1449
static void
1450
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
1451
{
1452
struct drm_crtc *crtc;
1453
struct drm_crtc_state *new_crtc_state;
1454
struct drm_connector *connector;
1455
struct drm_connector_state *new_conn_state;
1456
int i;
1457
1458
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
1459
const struct drm_crtc_helper_funcs *funcs;
1460
1461
if (!new_crtc_state->mode_changed)
1462
continue;
1463
1464
funcs = crtc->helper_private;
1465
1466
if (new_crtc_state->enable && funcs->mode_set_nofb) {
1467
drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n",
1468
crtc->base.id, crtc->name);
1469
1470
funcs->mode_set_nofb(crtc);
1471
}
1472
}
1473
1474
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1475
const struct drm_encoder_helper_funcs *funcs;
1476
struct drm_encoder *encoder;
1477
struct drm_display_mode *mode, *adjusted_mode;
1478
struct drm_bridge *bridge;
1479
1480
if (!new_conn_state->best_encoder)
1481
continue;
1482
1483
encoder = new_conn_state->best_encoder;
1484
funcs = encoder->helper_private;
1485
new_crtc_state = new_conn_state->crtc->state;
1486
mode = &new_crtc_state->mode;
1487
adjusted_mode = &new_crtc_state->adjusted_mode;
1488
1489
if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
1490
continue;
1491
1492
drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n",
1493
encoder->base.id, encoder->name);
1494
1495
/*
1496
* Each encoder has at most one connector (since we always steal
1497
* it away), so we won't call mode_set hooks twice.
1498
*/
1499
if (funcs && funcs->atomic_mode_set) {
1500
funcs->atomic_mode_set(encoder, new_crtc_state,
1501
new_conn_state);
1502
} else if (funcs && funcs->mode_set) {
1503
funcs->mode_set(encoder, mode, adjusted_mode);
1504
}
1505
1506
bridge = drm_bridge_chain_get_first_bridge(encoder);
1507
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
1508
drm_bridge_put(bridge);
1509
}
1510
}
1511
1512
/**
1513
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
1514
* @dev: DRM device
1515
* @state: atomic state object being committed
1516
*
1517
* This function shuts down all the outputs that need to be shut down and
1518
* prepares them (if required) with the new mode.
1519
*
1520
* For compatibility with legacy CRTC helpers this should be called before
1521
* drm_atomic_helper_commit_planes(), which is what the default commit function
1522
* does. But drivers with different needs can group the modeset commits together
1523
* and do the plane commits at the end. This is useful for drivers doing runtime
1524
* PM since planes updates then only happen when the CRTC is actually enabled.
1525
*/
1526
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
1527
struct drm_atomic_state *state)
1528
{
1529
disable_outputs(dev, state);
1530
1531
drm_atomic_helper_update_legacy_modeset_state(dev, state);
1532
drm_atomic_helper_calc_timestamping_constants(state);
1533
1534
crtc_set_mode(dev, state);
1535
}
1536
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
1537
1538
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
1539
struct drm_atomic_state *state)
1540
{
1541
struct drm_connector *connector;
1542
struct drm_connector_state *new_conn_state;
1543
int i;
1544
1545
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1546
const struct drm_connector_helper_funcs *funcs;
1547
1548
funcs = connector->helper_private;
1549
if (!funcs->atomic_commit)
1550
continue;
1551
1552
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
1553
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
1554
funcs->atomic_commit(connector, state);
1555
}
1556
}
1557
}
1558
1559
static void
1560
encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
1561
{
1562
struct drm_connector *connector;
1563
struct drm_connector_state *new_conn_state;
1564
int i;
1565
1566
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1567
struct drm_encoder *encoder;
1568
struct drm_bridge *bridge;
1569
1570
if (!new_conn_state->best_encoder)
1571
continue;
1572
1573
if (!new_conn_state->crtc->state->active ||
1574
!drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1575
continue;
1576
1577
encoder = new_conn_state->best_encoder;
1578
1579
drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
1580
encoder->base.id, encoder->name);
1581
1582
/*
1583
* Each encoder has at most one connector (since we always steal
1584
* it away), so we won't call enable hooks twice.
1585
*/
1586
bridge = drm_bridge_chain_get_first_bridge(encoder);
1587
drm_atomic_bridge_chain_pre_enable(bridge, state);
1588
drm_bridge_put(bridge);
1589
}
1590
}
1591
1592
static void
1593
crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
1594
{
1595
struct drm_crtc *crtc;
1596
struct drm_crtc_state *old_crtc_state;
1597
struct drm_crtc_state *new_crtc_state;
1598
int i;
1599
1600
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1601
const struct drm_crtc_helper_funcs *funcs;
1602
1603
/* Need to filter out CRTCs where only planes change. */
1604
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
1605
continue;
1606
1607
if (!new_crtc_state->active)
1608
continue;
1609
1610
funcs = crtc->helper_private;
1611
1612
if (new_crtc_state->enable) {
1613
drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n",
1614
crtc->base.id, crtc->name);
1615
if (funcs->atomic_enable)
1616
funcs->atomic_enable(crtc, state);
1617
else if (funcs->commit)
1618
funcs->commit(crtc);
1619
}
1620
}
1621
}
1622
1623
static void
1624
encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
1625
{
1626
struct drm_connector *connector;
1627
struct drm_connector_state *new_conn_state;
1628
int i;
1629
1630
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
1631
const struct drm_encoder_helper_funcs *funcs;
1632
struct drm_encoder *encoder;
1633
struct drm_bridge *bridge;
1634
1635
if (!new_conn_state->best_encoder)
1636
continue;
1637
1638
if (!new_conn_state->crtc->state->active ||
1639
!drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
1640
continue;
1641
1642
encoder = new_conn_state->best_encoder;
1643
funcs = encoder->helper_private;
1644
1645
drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n",
1646
encoder->base.id, encoder->name);
1647
1648
/*
1649
* Each encoder has at most one connector (since we always steal
1650
* it away), so we won't call enable hooks twice.
1651
*/
1652
bridge = drm_bridge_chain_get_first_bridge(encoder);
1653
1654
if (funcs) {
1655
if (funcs->atomic_enable)
1656
funcs->atomic_enable(encoder, state);
1657
else if (funcs->enable)
1658
funcs->enable(encoder);
1659
else if (funcs->commit)
1660
funcs->commit(encoder);
1661
}
1662
1663
drm_atomic_bridge_chain_enable(bridge, state);
1664
drm_bridge_put(bridge);
1665
}
1666
}
1667
1668
/**
1669
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
1670
* @dev: DRM device
1671
* @state: atomic state object being committed
1672
*
1673
* This function enables all the outputs with the new configuration which had to
1674
* be turned off for the update.
1675
*
1676
* For compatibility with legacy CRTC helpers this should be called after
1677
* drm_atomic_helper_commit_planes(), which is what the default commit function
1678
* does. But drivers with different needs can group the modeset commits together
1679
* and do the plane commits at the end. This is useful for drivers doing runtime
1680
* PM since planes updates then only happen when the CRTC is actually enabled.
1681
*/
1682
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
1683
struct drm_atomic_state *state)
1684
{
1685
encoder_bridge_pre_enable(dev, state);
1686
1687
crtc_enable(dev, state);
1688
1689
encoder_bridge_enable(dev, state);
1690
1691
drm_atomic_helper_commit_writebacks(dev, state);
1692
}
1693
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
1694
1695
/*
1696
* For atomic updates which touch just a single CRTC, calculate the time of the
1697
* next vblank, and inform all the fences of the deadline.
1698
*/
1699
static void set_fence_deadline(struct drm_device *dev,
1700
struct drm_atomic_state *state)
1701
{
1702
struct drm_crtc *crtc;
1703
struct drm_crtc_state *new_crtc_state;
1704
struct drm_plane *plane;
1705
struct drm_plane_state *new_plane_state;
1706
ktime_t vbltime = 0;
1707
int i;
1708
1709
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
1710
ktime_t v;
1711
1712
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
1713
continue;
1714
1715
if (!new_crtc_state->active)
1716
continue;
1717
1718
if (drm_crtc_next_vblank_start(crtc, &v))
1719
continue;
1720
1721
if (!vbltime || ktime_before(v, vbltime))
1722
vbltime = v;
1723
}
1724
1725
/* If no CRTCs updated, then nothing to do: */
1726
if (!vbltime)
1727
return;
1728
1729
for_each_new_plane_in_state (state, plane, new_plane_state, i) {
1730
if (!new_plane_state->fence)
1731
continue;
1732
dma_fence_set_deadline(new_plane_state->fence, vbltime);
1733
}
1734
}
1735
1736
/**
1737
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
1738
* @dev: DRM device
1739
* @state: atomic state object with old state structures
1740
* @pre_swap: If true, do an interruptible wait, and @state is the new state.
1741
* Otherwise @state is the old state.
1742
*
1743
* For implicit sync, driver should fish the exclusive fence out from the
1744
* incoming fb's and stash it in the drm_plane_state. This is called after
1745
* drm_atomic_helper_swap_state() so it uses the current plane state (and
1746
* just uses the atomic state to find the changed planes)
1747
*
1748
* Note that @pre_swap is needed since the point where we block for fences moves
1749
* around depending upon whether an atomic commit is blocking or
1750
* non-blocking. For non-blocking commit all waiting needs to happen after
1751
* drm_atomic_helper_swap_state() is called, but for blocking commits we want
1752
* to wait **before** we do anything that can't be easily rolled back. That is
1753
* before we call drm_atomic_helper_swap_state().
1754
*
1755
* Returns zero if success or < 0 if dma_fence_wait() fails.
1756
*/
1757
int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
1758
struct drm_atomic_state *state,
1759
bool pre_swap)
1760
{
1761
struct drm_plane *plane;
1762
struct drm_plane_state *new_plane_state;
1763
int i, ret;
1764
1765
set_fence_deadline(dev, state);
1766
1767
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1768
if (!new_plane_state->fence)
1769
continue;
1770
1771
WARN_ON(!new_plane_state->fb);
1772
1773
/*
1774
* If waiting for fences pre-swap (ie: nonblock), userspace can
1775
* still interrupt the operation. Instead of blocking until the
1776
* timer expires, make the wait interruptible.
1777
*/
1778
ret = dma_fence_wait(new_plane_state->fence, pre_swap);
1779
if (ret)
1780
return ret;
1781
1782
dma_fence_put(new_plane_state->fence);
1783
new_plane_state->fence = NULL;
1784
}
1785
1786
return 0;
1787
}
1788
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
1789
1790
/**
1791
* drm_atomic_helper_wait_for_vblanks - wait for vblank on CRTCs
1792
* @dev: DRM device
1793
* @state: atomic state object being committed
1794
*
1795
* Helper to, after atomic commit, wait for vblanks on all affected
1796
* CRTCs (ie. before cleaning up old framebuffers using
1797
* drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
1798
* framebuffers have actually changed to optimize for the legacy cursor and
1799
* plane update use-case.
1800
*
1801
* Drivers using the nonblocking commit tracking support initialized by calling
1802
* drm_atomic_helper_setup_commit() should look at
1803
* drm_atomic_helper_wait_for_flip_done() as an alternative.
1804
*/
1805
void
1806
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1807
struct drm_atomic_state *state)
1808
{
1809
struct drm_crtc *crtc;
1810
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1811
int i, ret;
1812
unsigned int crtc_mask = 0;
1813
1814
/*
1815
* Legacy cursor ioctls are completely unsynced, and userspace
1816
* relies on that (by doing tons of cursor updates).
1817
*/
1818
if (state->legacy_cursor_update)
1819
return;
1820
1821
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1822
if (!new_crtc_state->active)
1823
continue;
1824
1825
ret = drm_crtc_vblank_get(crtc);
1826
if (ret != 0)
1827
continue;
1828
1829
crtc_mask |= drm_crtc_mask(crtc);
1830
state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
1831
}
1832
1833
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
1834
if (!(crtc_mask & drm_crtc_mask(crtc)))
1835
continue;
1836
1837
ret = wait_event_timeout(dev->vblank[i].queue,
1838
state->crtcs[i].last_vblank_count !=
1839
drm_crtc_vblank_count(crtc),
1840
msecs_to_jiffies(100));
1841
1842
WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
1843
crtc->base.id, crtc->name);
1844
1845
drm_crtc_vblank_put(crtc);
1846
}
1847
}
1848
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1849
1850
/**
1851
* drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
1852
* @dev: DRM device
1853
* @state: atomic state object being committed
1854
*
1855
* Helper to, after atomic commit, wait for page flips on all affected
1856
* crtcs (ie. before cleaning up old framebuffers using
1857
* drm_atomic_helper_cleanup_planes()). Compared to
1858
* drm_atomic_helper_wait_for_vblanks() this waits for the completion on all
1859
* CRTCs, assuming that cursors-only updates are signalling their completion
1860
* immediately (or using a different path).
1861
*
1862
* This requires that drivers use the nonblocking commit tracking support
1863
* initialized using drm_atomic_helper_setup_commit().
1864
*/
1865
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
1866
struct drm_atomic_state *state)
1867
{
1868
struct drm_crtc *crtc;
1869
int i;
1870
1871
for (i = 0; i < dev->mode_config.num_crtc; i++) {
1872
struct drm_crtc_commit *commit = state->crtcs[i].commit;
1873
int ret;
1874
1875
crtc = state->crtcs[i].ptr;
1876
1877
if (!crtc || !commit)
1878
continue;
1879
1880
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
1881
if (ret == 0)
1882
drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n",
1883
crtc->base.id, crtc->name);
1884
}
1885
1886
if (state->fake_commit)
1887
complete_all(&state->fake_commit->flip_done);
1888
}
1889
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
1890
1891
/**
1892
* drm_atomic_helper_commit_tail - commit atomic update to hardware
1893
* @state: atomic state object being committed
1894
*
1895
* This is the default implementation for the
1896
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1897
* that do not support runtime_pm or do not need the CRTC to be
1898
* enabled to perform a commit. Otherwise, see
1899
* drm_atomic_helper_commit_tail_rpm().
1900
*
1901
* Note that the default ordering of how the various stages are called is to
1902
* match the legacy modeset helper library closest.
1903
*/
1904
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1905
{
1906
struct drm_device *dev = state->dev;
1907
1908
drm_atomic_helper_commit_modeset_disables(dev, state);
1909
1910
drm_atomic_helper_commit_planes(dev, state, 0);
1911
1912
drm_atomic_helper_commit_modeset_enables(dev, state);
1913
1914
drm_atomic_helper_fake_vblank(state);
1915
1916
drm_atomic_helper_commit_hw_done(state);
1917
1918
drm_atomic_helper_wait_for_vblanks(dev, state);
1919
1920
drm_atomic_helper_cleanup_planes(dev, state);
1921
}
1922
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1923
1924
/**
1925
* drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
1926
* @state: new modeset state to be committed
1927
*
1928
* This is an alternative implementation for the
1929
* &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
1930
* that support runtime_pm or need the CRTC to be enabled to perform a
1931
* commit. Otherwise, one should use the default implementation
1932
* drm_atomic_helper_commit_tail().
1933
*/
1934
void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state)
1935
{
1936
struct drm_device *dev = state->dev;
1937
1938
drm_atomic_helper_commit_modeset_disables(dev, state);
1939
1940
drm_atomic_helper_commit_modeset_enables(dev, state);
1941
1942
drm_atomic_helper_commit_planes(dev, state,
1943
DRM_PLANE_COMMIT_ACTIVE_ONLY);
1944
1945
drm_atomic_helper_fake_vblank(state);
1946
1947
drm_atomic_helper_commit_hw_done(state);
1948
1949
drm_atomic_helper_wait_for_vblanks(dev, state);
1950
1951
drm_atomic_helper_cleanup_planes(dev, state);
1952
}
1953
EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
1954
1955
static void commit_tail(struct drm_atomic_state *state)
1956
{
1957
struct drm_device *dev = state->dev;
1958
const struct drm_mode_config_helper_funcs *funcs;
1959
struct drm_crtc_state *new_crtc_state;
1960
struct drm_crtc *crtc;
1961
ktime_t start;
1962
s64 commit_time_ms;
1963
unsigned int i, new_self_refresh_mask = 0;
1964
1965
funcs = dev->mode_config.helper_private;
1966
1967
/*
1968
* We're measuring the _entire_ commit, so the time will vary depending
1969
* on how many fences and objects are involved. For the purposes of self
1970
* refresh, this is desirable since it'll give us an idea of how
1971
* congested things are. This will inform our decision on how often we
1972
* should enter self refresh after idle.
1973
*
1974
* These times will be averaged out in the self refresh helpers to avoid
1975
* overreacting over one outlier frame
1976
*/
1977
start = ktime_get();
1978
1979
drm_atomic_helper_wait_for_fences(dev, state, false);
1980
1981
drm_atomic_helper_wait_for_dependencies(state);
1982
1983
/*
1984
* We cannot safely access new_crtc_state after
1985
* drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1986
* self-refresh active beforehand:
1987
*/
1988
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
1989
if (new_crtc_state->self_refresh_active)
1990
new_self_refresh_mask |= BIT(i);
1991
1992
if (funcs && funcs->atomic_commit_tail)
1993
funcs->atomic_commit_tail(state);
1994
else
1995
drm_atomic_helper_commit_tail(state);
1996
1997
commit_time_ms = ktime_ms_delta(ktime_get(), start);
1998
if (commit_time_ms > 0)
1999
drm_self_refresh_helper_update_avg_times(state,
2000
(unsigned long)commit_time_ms,
2001
new_self_refresh_mask);
2002
2003
drm_atomic_helper_commit_cleanup_done(state);
2004
2005
drm_atomic_state_put(state);
2006
}
2007
2008
static void commit_work(struct work_struct *work)
2009
{
2010
struct drm_atomic_state *state = container_of(work,
2011
struct drm_atomic_state,
2012
commit_work);
2013
commit_tail(state);
2014
}
2015
2016
/**
2017
* drm_atomic_helper_async_check - check if state can be committed asynchronously
2018
* @dev: DRM device
2019
* @state: the driver state object
2020
*
2021
* This helper will check if it is possible to commit the state asynchronously.
2022
* Async commits are not supposed to swap the states like normal sync commits
2023
* but just do in-place changes on the current state.
2024
*
2025
* It will return 0 if the commit can happen in an asynchronous fashion or error
2026
* if not. Note that error just mean it can't be committed asynchronously, if it
2027
* fails the commit should be treated like a normal synchronous commit.
2028
*/
2029
int drm_atomic_helper_async_check(struct drm_device *dev,
2030
struct drm_atomic_state *state)
2031
{
2032
struct drm_crtc *crtc;
2033
struct drm_crtc_state *crtc_state;
2034
struct drm_plane *plane = NULL;
2035
struct drm_plane_state *old_plane_state = NULL;
2036
struct drm_plane_state *new_plane_state = NULL;
2037
const struct drm_plane_helper_funcs *funcs;
2038
int i, ret, n_planes = 0;
2039
2040
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2041
if (drm_atomic_crtc_needs_modeset(crtc_state))
2042
return -EINVAL;
2043
}
2044
2045
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
2046
n_planes++;
2047
2048
/* FIXME: we support only single plane updates for now */
2049
if (n_planes != 1) {
2050
drm_dbg_atomic(dev,
2051
"only single plane async updates are supported\n");
2052
return -EINVAL;
2053
}
2054
2055
if (!new_plane_state->crtc ||
2056
old_plane_state->crtc != new_plane_state->crtc) {
2057
drm_dbg_atomic(dev,
2058
"[PLANE:%d:%s] async update cannot change CRTC\n",
2059
plane->base.id, plane->name);
2060
return -EINVAL;
2061
}
2062
2063
funcs = plane->helper_private;
2064
if (!funcs->atomic_async_update) {
2065
drm_dbg_atomic(dev,
2066
"[PLANE:%d:%s] driver does not support async updates\n",
2067
plane->base.id, plane->name);
2068
return -EINVAL;
2069
}
2070
2071
if (new_plane_state->fence) {
2072
drm_dbg_atomic(dev,
2073
"[PLANE:%d:%s] missing fence for async update\n",
2074
plane->base.id, plane->name);
2075
return -EINVAL;
2076
}
2077
2078
/*
2079
* Don't do an async update if there is an outstanding commit modifying
2080
* the plane. This prevents our async update's changes from getting
2081
* overridden by a previous synchronous update's state.
2082
*/
2083
if (old_plane_state->commit &&
2084
!try_wait_for_completion(&old_plane_state->commit->hw_done)) {
2085
drm_dbg_atomic(dev,
2086
"[PLANE:%d:%s] inflight previous commit preventing async commit\n",
2087
plane->base.id, plane->name);
2088
return -EBUSY;
2089
}
2090
2091
ret = funcs->atomic_async_check(plane, state, false);
2092
if (ret != 0)
2093
drm_dbg_atomic(dev,
2094
"[PLANE:%d:%s] driver async check failed\n",
2095
plane->base.id, plane->name);
2096
return ret;
2097
}
2098
EXPORT_SYMBOL(drm_atomic_helper_async_check);
2099
2100
/**
2101
* drm_atomic_helper_async_commit - commit state asynchronously
2102
* @dev: DRM device
2103
* @state: the driver state object
2104
*
2105
* This function commits a state asynchronously, i.e., not vblank
2106
* synchronized. It should be used on a state only when
2107
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
2108
* the states like normal sync commits, but just do in-place changes on the
2109
* current state.
2110
*
2111
* TODO: Implement full swap instead of doing in-place changes.
2112
*/
2113
void drm_atomic_helper_async_commit(struct drm_device *dev,
2114
struct drm_atomic_state *state)
2115
{
2116
struct drm_plane *plane;
2117
struct drm_plane_state *plane_state;
2118
const struct drm_plane_helper_funcs *funcs;
2119
int i;
2120
2121
for_each_new_plane_in_state(state, plane, plane_state, i) {
2122
struct drm_framebuffer *new_fb = plane_state->fb;
2123
struct drm_framebuffer *old_fb = plane->state->fb;
2124
2125
funcs = plane->helper_private;
2126
funcs->atomic_async_update(plane, state);
2127
2128
/*
2129
* ->atomic_async_update() is supposed to update the
2130
* plane->state in-place, make sure at least common
2131
* properties have been properly updated.
2132
*/
2133
WARN_ON_ONCE(plane->state->fb != new_fb);
2134
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
2135
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
2136
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
2137
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
2138
2139
/*
2140
* Make sure the FBs have been swapped so that cleanups in the
2141
* new_state performs a cleanup in the old FB.
2142
*/
2143
WARN_ON_ONCE(plane_state->fb != old_fb);
2144
}
2145
}
2146
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
2147
2148
/**
2149
* drm_atomic_helper_commit - commit validated state object
2150
* @dev: DRM device
2151
* @state: the driver state object
2152
* @nonblock: whether nonblocking behavior is requested.
2153
*
2154
* This function commits a with drm_atomic_helper_check() pre-validated state
2155
* object. This can still fail when e.g. the framebuffer reservation fails. This
2156
* function implements nonblocking commits, using
2157
* drm_atomic_helper_setup_commit() and related functions.
2158
*
2159
* Committing the actual hardware state is done through the
2160
* &drm_mode_config_helper_funcs.atomic_commit_tail callback, or its default
2161
* implementation drm_atomic_helper_commit_tail().
2162
*
2163
* RETURNS:
2164
* Zero for success or -errno.
2165
*/
2166
int drm_atomic_helper_commit(struct drm_device *dev,
2167
struct drm_atomic_state *state,
2168
bool nonblock)
2169
{
2170
int ret;
2171
2172
if (state->async_update) {
2173
ret = drm_atomic_helper_prepare_planes(dev, state);
2174
if (ret)
2175
return ret;
2176
2177
drm_atomic_helper_async_commit(dev, state);
2178
drm_atomic_helper_unprepare_planes(dev, state);
2179
2180
return 0;
2181
}
2182
2183
ret = drm_atomic_helper_setup_commit(state, nonblock);
2184
if (ret)
2185
return ret;
2186
2187
INIT_WORK(&state->commit_work, commit_work);
2188
2189
ret = drm_atomic_helper_prepare_planes(dev, state);
2190
if (ret)
2191
return ret;
2192
2193
if (!nonblock) {
2194
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
2195
if (ret)
2196
goto err;
2197
}
2198
2199
/*
2200
* This is the point of no return - everything below never fails except
2201
* when the hw goes bonghits. Which means we can commit the new state on
2202
* the software side now.
2203
*/
2204
2205
ret = drm_atomic_helper_swap_state(state, true);
2206
if (ret)
2207
goto err;
2208
2209
/*
2210
* Everything below can be run asynchronously without the need to grab
2211
* any modeset locks at all under one condition: It must be guaranteed
2212
* that the asynchronous work has either been cancelled (if the driver
2213
* supports it, which at least requires that the framebuffers get
2214
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
2215
* before the new state gets committed on the software side with
2216
* drm_atomic_helper_swap_state().
2217
*
2218
* This scheme allows new atomic state updates to be prepared and
2219
* checked in parallel to the asynchronous completion of the previous
2220
* update. Which is important since compositors need to figure out the
2221
* composition of the next frame right after having submitted the
2222
* current layout.
2223
*
2224
* NOTE: Commit work has multiple phases, first hardware commit, then
2225
* cleanup. We want them to overlap, hence need system_unbound_wq to
2226
* make sure work items don't artificially stall on each another.
2227
*/
2228
2229
drm_atomic_state_get(state);
2230
if (nonblock)
2231
queue_work(system_unbound_wq, &state->commit_work);
2232
else
2233
commit_tail(state);
2234
2235
return 0;
2236
2237
err:
2238
drm_atomic_helper_unprepare_planes(dev, state);
2239
return ret;
2240
}
2241
EXPORT_SYMBOL(drm_atomic_helper_commit);
2242
2243
/**
2244
* DOC: implementing nonblocking commit
2245
*
2246
* Nonblocking atomic commits should use struct &drm_crtc_commit to sequence
2247
* different operations against each another. Locks, especially struct
2248
* &drm_modeset_lock, should not be held in worker threads or any other
2249
* asynchronous context used to commit the hardware state.
2250
*
2251
* drm_atomic_helper_commit() implements the recommended sequence for
2252
* nonblocking commits, using drm_atomic_helper_setup_commit() internally:
2253
*
2254
* 1. Run drm_atomic_helper_prepare_planes(). Since this can fail and we
2255
* need to propagate out of memory/VRAM errors to userspace, it must be called
2256
* synchronously.
2257
*
2258
* 2. Synchronize with any outstanding nonblocking commit worker threads which
2259
* might be affected by the new state update. This is handled by
2260
* drm_atomic_helper_setup_commit().
2261
*
2262
* Asynchronous workers need to have sufficient parallelism to be able to run
2263
* different atomic commits on different CRTCs in parallel. The simplest way to
2264
* achieve this is by running them on the &system_unbound_wq work queue. Note
2265
* that drivers are not required to split up atomic commits and run an
2266
* individual commit in parallel - userspace is supposed to do that if it cares.
2267
* But it might be beneficial to do that for modesets, since those necessarily
2268
* must be done as one global operation, and enabling or disabling a CRTC can
2269
* take a long time. But even that is not required.
2270
*
2271
* IMPORTANT: A &drm_atomic_state update for multiple CRTCs is sequenced
2272
* against all CRTCs therein. Therefore for atomic state updates which only flip
2273
* planes the driver must not get the struct &drm_crtc_state of unrelated CRTCs
2274
* in its atomic check code: This would prevent committing of atomic updates to
2275
* multiple CRTCs in parallel. In general, adding additional state structures
2276
* should be avoided as much as possible, because this reduces parallelism in
2277
* (nonblocking) commits, both due to locking and due to commit sequencing
2278
* requirements.
2279
*
2280
* 3. The software state is updated synchronously with
2281
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
2282
* locks means concurrent callers never see inconsistent state. Note that commit
2283
* workers do not hold any locks; their access is only coordinated through
2284
* ordering. If workers would access state only through the pointers in the
2285
* free-standing state objects (currently not the case for any driver) then even
2286
* multiple pending commits could be in-flight at the same time.
2287
*
2288
* 4. Schedule a work item to do all subsequent steps, using the split-out
2289
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
2290
* then cleaning up the framebuffers after the old framebuffer is no longer
2291
* being displayed. The scheduled work should synchronize against other workers
2292
* using the &drm_crtc_commit infrastructure as needed. See
2293
* drm_atomic_helper_setup_commit() for more details.
2294
*/
2295
2296
static int stall_checks(struct drm_crtc *crtc, bool nonblock)
2297
{
2298
struct drm_crtc_commit *commit, *stall_commit = NULL;
2299
bool completed = true;
2300
int i;
2301
long ret = 0;
2302
2303
spin_lock(&crtc->commit_lock);
2304
i = 0;
2305
list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
2306
if (i == 0) {
2307
completed = try_wait_for_completion(&commit->flip_done);
2308
/*
2309
* Userspace is not allowed to get ahead of the previous
2310
* commit with nonblocking ones.
2311
*/
2312
if (!completed && nonblock) {
2313
spin_unlock(&crtc->commit_lock);
2314
drm_dbg_atomic(crtc->dev,
2315
"[CRTC:%d:%s] busy with a previous commit\n",
2316
crtc->base.id, crtc->name);
2317
2318
return -EBUSY;
2319
}
2320
} else if (i == 1) {
2321
stall_commit = drm_crtc_commit_get(commit);
2322
break;
2323
}
2324
2325
i++;
2326
}
2327
spin_unlock(&crtc->commit_lock);
2328
2329
if (!stall_commit)
2330
return 0;
2331
2332
/* We don't want to let commits get ahead of cleanup work too much,
2333
* stalling on 2nd previous commit means triple-buffer won't ever stall.
2334
*/
2335
ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
2336
10*HZ);
2337
if (ret == 0)
2338
drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n",
2339
crtc->base.id, crtc->name);
2340
2341
drm_crtc_commit_put(stall_commit);
2342
2343
return ret < 0 ? ret : 0;
2344
}
2345
2346
static void release_crtc_commit(struct completion *completion)
2347
{
2348
struct drm_crtc_commit *commit = container_of(completion,
2349
typeof(*commit),
2350
flip_done);
2351
2352
drm_crtc_commit_put(commit);
2353
}
2354
2355
static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
2356
{
2357
init_completion(&commit->flip_done);
2358
init_completion(&commit->hw_done);
2359
init_completion(&commit->cleanup_done);
2360
INIT_LIST_HEAD(&commit->commit_entry);
2361
kref_init(&commit->ref);
2362
commit->crtc = crtc;
2363
}
2364
2365
static struct drm_crtc_commit *
2366
crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
2367
{
2368
if (crtc) {
2369
struct drm_crtc_state *new_crtc_state;
2370
2371
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
2372
2373
return new_crtc_state->commit;
2374
}
2375
2376
if (!state->fake_commit) {
2377
state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
2378
if (!state->fake_commit)
2379
return NULL;
2380
2381
init_commit(state->fake_commit, NULL);
2382
}
2383
2384
return state->fake_commit;
2385
}
2386
2387
/**
2388
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
2389
* @state: new modeset state to be committed
2390
* @nonblock: whether nonblocking behavior is requested.
2391
*
2392
* This function prepares @state to be used by the atomic helper's support for
2393
* nonblocking commits. Drivers using the nonblocking commit infrastructure
2394
* should always call this function from their
2395
* &drm_mode_config_funcs.atomic_commit hook.
2396
*
2397
* Drivers that need to extend the commit setup to private objects can use the
2398
* &drm_mode_config_helper_funcs.atomic_commit_setup hook.
2399
*
2400
* To be able to use this support drivers need to use a few more helper
2401
* functions. drm_atomic_helper_wait_for_dependencies() must be called before
2402
* actually committing the hardware state, and for nonblocking commits this call
2403
* must be placed in the async worker. See also drm_atomic_helper_swap_state()
2404
* and its stall parameter, for when a driver's commit hooks look at the
2405
* &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
2406
*
2407
* Completion of the hardware commit step must be signalled using
2408
* drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
2409
* to read or change any permanent software or hardware modeset state. The only
2410
* exception is state protected by other means than &drm_modeset_lock locks.
2411
* Only the free standing @state with pointers to the old state structures can
2412
* be inspected, e.g. to clean up old buffers using
2413
* drm_atomic_helper_cleanup_planes().
2414
*
2415
* At the very end, before cleaning up @state drivers must call
2416
* drm_atomic_helper_commit_cleanup_done().
2417
*
2418
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
2419
* complete and easy-to-use default implementation of the atomic_commit() hook.
2420
*
2421
* The tracking of asynchronously executed and still pending commits is done
2422
* using the core structure &drm_crtc_commit.
2423
*
2424
* By default there's no need to clean up resources allocated by this function
2425
* explicitly: drm_atomic_state_default_clear() will take care of that
2426
* automatically.
2427
*
2428
* Returns:
2429
* 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
2430
* -ENOMEM on allocation failures and -EINTR when a signal is pending.
2431
*/
2432
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
2433
bool nonblock)
2434
{
2435
struct drm_crtc *crtc;
2436
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2437
struct drm_connector *conn;
2438
struct drm_connector_state *old_conn_state, *new_conn_state;
2439
struct drm_plane *plane;
2440
struct drm_plane_state *old_plane_state, *new_plane_state;
2441
struct drm_crtc_commit *commit;
2442
const struct drm_mode_config_helper_funcs *funcs;
2443
int i, ret;
2444
2445
funcs = state->dev->mode_config.helper_private;
2446
2447
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2448
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
2449
if (!commit)
2450
return -ENOMEM;
2451
2452
init_commit(commit, crtc);
2453
2454
new_crtc_state->commit = commit;
2455
2456
ret = stall_checks(crtc, nonblock);
2457
if (ret)
2458
return ret;
2459
2460
/*
2461
* Drivers only send out events when at least either current or
2462
* new CRTC state is active. Complete right away if everything
2463
* stays off.
2464
*/
2465
if (!old_crtc_state->active && !new_crtc_state->active) {
2466
complete_all(&commit->flip_done);
2467
continue;
2468
}
2469
2470
/* Legacy cursor updates are fully unsynced. */
2471
if (state->legacy_cursor_update) {
2472
complete_all(&commit->flip_done);
2473
continue;
2474
}
2475
2476
if (!new_crtc_state->event) {
2477
commit->event = kzalloc(sizeof(*commit->event),
2478
GFP_KERNEL);
2479
if (!commit->event)
2480
return -ENOMEM;
2481
2482
new_crtc_state->event = commit->event;
2483
}
2484
2485
new_crtc_state->event->base.completion = &commit->flip_done;
2486
new_crtc_state->event->base.completion_release = release_crtc_commit;
2487
drm_crtc_commit_get(commit);
2488
2489
commit->abort_completion = true;
2490
2491
state->crtcs[i].commit = commit;
2492
drm_crtc_commit_get(commit);
2493
}
2494
2495
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
2496
/*
2497
* Userspace is not allowed to get ahead of the previous
2498
* commit with nonblocking ones.
2499
*/
2500
if (nonblock && old_conn_state->commit &&
2501
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
2502
drm_dbg_atomic(conn->dev,
2503
"[CONNECTOR:%d:%s] busy with a previous commit\n",
2504
conn->base.id, conn->name);
2505
2506
return -EBUSY;
2507
}
2508
2509
/* Always track connectors explicitly for e.g. link retraining. */
2510
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
2511
if (!commit)
2512
return -ENOMEM;
2513
2514
new_conn_state->commit = drm_crtc_commit_get(commit);
2515
}
2516
2517
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2518
/*
2519
* Userspace is not allowed to get ahead of the previous
2520
* commit with nonblocking ones.
2521
*/
2522
if (nonblock && old_plane_state->commit &&
2523
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
2524
drm_dbg_atomic(plane->dev,
2525
"[PLANE:%d:%s] busy with a previous commit\n",
2526
plane->base.id, plane->name);
2527
2528
return -EBUSY;
2529
}
2530
2531
/* Always track planes explicitly for async pageflip support. */
2532
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
2533
if (!commit)
2534
return -ENOMEM;
2535
2536
new_plane_state->commit = drm_crtc_commit_get(commit);
2537
}
2538
2539
if (funcs && funcs->atomic_commit_setup)
2540
return funcs->atomic_commit_setup(state);
2541
2542
return 0;
2543
}
2544
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
2545
2546
/**
2547
* drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
2548
* @state: atomic state object being committed
2549
*
2550
* This function waits for all preceding commits that touch the same CRTC as
2551
* @state to both be committed to the hardware (as signalled by
2552
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
2553
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
2554
*
2555
* This is part of the atomic helper support for nonblocking commits, see
2556
* drm_atomic_helper_setup_commit() for an overview.
2557
*/
2558
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
2559
{
2560
struct drm_crtc *crtc;
2561
struct drm_crtc_state *old_crtc_state;
2562
struct drm_plane *plane;
2563
struct drm_plane_state *old_plane_state;
2564
struct drm_connector *conn;
2565
struct drm_connector_state *old_conn_state;
2566
int i;
2567
long ret;
2568
2569
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2570
ret = drm_crtc_commit_wait(old_crtc_state->commit);
2571
if (ret)
2572
drm_err(crtc->dev,
2573
"[CRTC:%d:%s] commit wait timed out\n",
2574
crtc->base.id, crtc->name);
2575
}
2576
2577
for_each_old_connector_in_state(state, conn, old_conn_state, i) {
2578
ret = drm_crtc_commit_wait(old_conn_state->commit);
2579
if (ret)
2580
drm_err(conn->dev,
2581
"[CONNECTOR:%d:%s] commit wait timed out\n",
2582
conn->base.id, conn->name);
2583
}
2584
2585
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2586
ret = drm_crtc_commit_wait(old_plane_state->commit);
2587
if (ret)
2588
drm_err(plane->dev,
2589
"[PLANE:%d:%s] commit wait timed out\n",
2590
plane->base.id, plane->name);
2591
}
2592
}
2593
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
2594
2595
/**
2596
* drm_atomic_helper_fake_vblank - fake VBLANK events if needed
2597
* @state: atomic state object being committed
2598
*
2599
* This function walks all CRTCs and fakes VBLANK events on those with
2600
* &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
2601
* The primary use of this function is writeback connectors working in oneshot
2602
* mode and faking VBLANK events. In this case they only fake the VBLANK event
2603
* when a job is queued, and any change to the pipeline that does not touch the
2604
* connector is leading to timeouts when calling
2605
* drm_atomic_helper_wait_for_vblanks() or
2606
* drm_atomic_helper_wait_for_flip_done(). In addition to writeback
2607
* connectors, this function can also fake VBLANK events for CRTCs without
2608
* VBLANK interrupt.
2609
*
2610
* This is part of the atomic helper support for nonblocking commits, see
2611
* drm_atomic_helper_setup_commit() for an overview.
2612
*/
2613
void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state)
2614
{
2615
struct drm_crtc_state *new_crtc_state;
2616
struct drm_crtc *crtc;
2617
int i;
2618
2619
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
2620
unsigned long flags;
2621
2622
if (!new_crtc_state->no_vblank)
2623
continue;
2624
2625
spin_lock_irqsave(&state->dev->event_lock, flags);
2626
if (new_crtc_state->event) {
2627
drm_crtc_send_vblank_event(crtc,
2628
new_crtc_state->event);
2629
new_crtc_state->event = NULL;
2630
}
2631
spin_unlock_irqrestore(&state->dev->event_lock, flags);
2632
}
2633
}
2634
EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
2635
2636
/**
2637
* drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
2638
* @state: atomic state object being committed
2639
*
2640
* This function is used to signal completion of the hardware commit step. After
2641
* this step the driver is not allowed to read or change any permanent software
2642
* or hardware modeset state. The only exception is state protected by other
2643
* means than &drm_modeset_lock locks.
2644
*
2645
* Drivers should try to postpone any expensive or delayed cleanup work after
2646
* this function is called.
2647
*
2648
* This is part of the atomic helper support for nonblocking commits, see
2649
* drm_atomic_helper_setup_commit() for an overview.
2650
*/
2651
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
2652
{
2653
struct drm_crtc *crtc;
2654
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2655
struct drm_crtc_commit *commit;
2656
int i;
2657
2658
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2659
commit = new_crtc_state->commit;
2660
if (!commit)
2661
continue;
2662
2663
/*
2664
* copy new_crtc_state->commit to old_crtc_state->commit,
2665
* it's unsafe to touch new_crtc_state after hw_done,
2666
* but we still need to do so in cleanup_done().
2667
*/
2668
if (old_crtc_state->commit)
2669
drm_crtc_commit_put(old_crtc_state->commit);
2670
2671
old_crtc_state->commit = drm_crtc_commit_get(commit);
2672
2673
/* backend must have consumed any event by now */
2674
WARN_ON(new_crtc_state->event);
2675
complete_all(&commit->hw_done);
2676
}
2677
2678
if (state->fake_commit) {
2679
complete_all(&state->fake_commit->hw_done);
2680
complete_all(&state->fake_commit->flip_done);
2681
}
2682
}
2683
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
2684
2685
/**
2686
* drm_atomic_helper_commit_cleanup_done - signal completion of commit
2687
* @state: atomic state object being committed
2688
*
2689
* This signals completion of the atomic update @state, including any
2690
* cleanup work. If used, it must be called right before calling
2691
* drm_atomic_state_put().
2692
*
2693
* This is part of the atomic helper support for nonblocking commits, see
2694
* drm_atomic_helper_setup_commit() for an overview.
2695
*/
2696
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
2697
{
2698
struct drm_crtc *crtc;
2699
struct drm_crtc_state *old_crtc_state;
2700
struct drm_crtc_commit *commit;
2701
int i;
2702
2703
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
2704
commit = old_crtc_state->commit;
2705
if (WARN_ON(!commit))
2706
continue;
2707
2708
complete_all(&commit->cleanup_done);
2709
WARN_ON(!try_wait_for_completion(&commit->hw_done));
2710
2711
spin_lock(&crtc->commit_lock);
2712
list_del(&commit->commit_entry);
2713
spin_unlock(&crtc->commit_lock);
2714
}
2715
2716
if (state->fake_commit) {
2717
complete_all(&state->fake_commit->cleanup_done);
2718
WARN_ON(!try_wait_for_completion(&state->fake_commit->hw_done));
2719
}
2720
}
2721
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
2722
2723
/**
2724
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
2725
* @dev: DRM device
2726
* @state: atomic state object with new state structures
2727
*
2728
* This function prepares plane state, specifically framebuffers, for the new
2729
* configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
2730
* is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
2731
* any already successfully prepared framebuffer.
2732
*
2733
* Returns:
2734
* 0 on success, negative error code on failure.
2735
*/
2736
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
2737
struct drm_atomic_state *state)
2738
{
2739
struct drm_connector *connector;
2740
struct drm_connector_state *new_conn_state;
2741
struct drm_plane *plane;
2742
struct drm_plane_state *new_plane_state;
2743
int ret, i, j;
2744
2745
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
2746
if (!new_conn_state->writeback_job)
2747
continue;
2748
2749
ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
2750
if (ret < 0)
2751
return ret;
2752
}
2753
2754
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2755
const struct drm_plane_helper_funcs *funcs;
2756
2757
funcs = plane->helper_private;
2758
2759
if (funcs->prepare_fb) {
2760
ret = funcs->prepare_fb(plane, new_plane_state);
2761
if (ret)
2762
goto fail_prepare_fb;
2763
} else {
2764
WARN_ON_ONCE(funcs->cleanup_fb);
2765
2766
if (!drm_core_check_feature(dev, DRIVER_GEM))
2767
continue;
2768
2769
ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
2770
if (ret)
2771
goto fail_prepare_fb;
2772
}
2773
}
2774
2775
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2776
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2777
2778
if (funcs->begin_fb_access) {
2779
ret = funcs->begin_fb_access(plane, new_plane_state);
2780
if (ret)
2781
goto fail_begin_fb_access;
2782
}
2783
}
2784
2785
return 0;
2786
2787
fail_begin_fb_access:
2788
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2789
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2790
2791
if (j >= i)
2792
continue;
2793
2794
if (funcs->end_fb_access)
2795
funcs->end_fb_access(plane, new_plane_state);
2796
}
2797
i = j; /* set i to upper limit to cleanup all planes */
2798
fail_prepare_fb:
2799
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
2800
const struct drm_plane_helper_funcs *funcs;
2801
2802
if (j >= i)
2803
continue;
2804
2805
funcs = plane->helper_private;
2806
2807
if (funcs->cleanup_fb)
2808
funcs->cleanup_fb(plane, new_plane_state);
2809
}
2810
2811
return ret;
2812
}
2813
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
2814
2815
/**
2816
* drm_atomic_helper_unprepare_planes - release plane resources on aborts
2817
* @dev: DRM device
2818
* @state: atomic state object with old state structures
2819
*
2820
* This function cleans up plane state, specifically framebuffers, from the
2821
* atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
2822
* when aborting an atomic commit. For cleaning up after a successful commit
2823
* use drm_atomic_helper_cleanup_planes().
2824
*/
2825
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
2826
struct drm_atomic_state *state)
2827
{
2828
struct drm_plane *plane;
2829
struct drm_plane_state *new_plane_state;
2830
int i;
2831
2832
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2833
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2834
2835
if (funcs->end_fb_access)
2836
funcs->end_fb_access(plane, new_plane_state);
2837
}
2838
2839
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
2840
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2841
2842
if (funcs->cleanup_fb)
2843
funcs->cleanup_fb(plane, new_plane_state);
2844
}
2845
}
2846
EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
2847
2848
static bool plane_crtc_active(const struct drm_plane_state *state)
2849
{
2850
return state->crtc && state->crtc->state->active;
2851
}
2852
2853
/**
2854
* drm_atomic_helper_commit_planes - commit plane state
2855
* @dev: DRM device
2856
* @state: atomic state object being committed
2857
* @flags: flags for committing plane state
2858
*
2859
* This function commits the new plane state using the plane and atomic helper
2860
* functions for planes and CRTCs. It assumes that the atomic state has already
2861
* been pushed into the relevant object state pointers, since this step can no
2862
* longer fail.
2863
*
2864
* It still requires the global state object @state to know which planes and
2865
* crtcs need to be updated though.
2866
*
2867
* Note that this function does all plane updates across all CRTCs in one step.
2868
* If the hardware can't support this approach look at
2869
* drm_atomic_helper_commit_planes_on_crtc() instead.
2870
*
2871
* Plane parameters can be updated by applications while the associated CRTC is
2872
* disabled. The DRM/KMS core will store the parameters in the plane state,
2873
* which will be available to the driver when the CRTC is turned on. As a result
2874
* most drivers don't need to be immediately notified of plane updates for a
2875
* disabled CRTC.
2876
*
2877
* Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
2878
* @flags in order not to receive plane update notifications related to a
2879
* disabled CRTC. This avoids the need to manually ignore plane updates in
2880
* driver code when the driver and/or hardware can't or just don't need to deal
2881
* with updates on disabled CRTCs, for example when supporting runtime PM.
2882
*
2883
* Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
2884
* display controllers require to disable a CRTC's planes when the CRTC is
2885
* disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
2886
* call for a plane if the CRTC of the old plane state needs a modesetting
2887
* operation. Of course, the drivers need to disable the planes in their CRTC
2888
* disable callbacks since no one else would do that.
2889
*
2890
* The drm_atomic_helper_commit() default implementation doesn't set the
2891
* ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
2892
* This should not be copied blindly by drivers.
2893
*/
2894
void drm_atomic_helper_commit_planes(struct drm_device *dev,
2895
struct drm_atomic_state *state,
2896
uint32_t flags)
2897
{
2898
struct drm_crtc *crtc;
2899
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
2900
struct drm_plane *plane;
2901
struct drm_plane_state *old_plane_state, *new_plane_state;
2902
int i;
2903
bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
2904
bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
2905
2906
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2907
const struct drm_crtc_helper_funcs *funcs;
2908
2909
funcs = crtc->helper_private;
2910
2911
if (!funcs || !funcs->atomic_begin)
2912
continue;
2913
2914
if (active_only && !new_crtc_state->active)
2915
continue;
2916
2917
funcs->atomic_begin(crtc, state);
2918
}
2919
2920
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
2921
const struct drm_plane_helper_funcs *funcs;
2922
bool disabling;
2923
2924
funcs = plane->helper_private;
2925
2926
if (!funcs)
2927
continue;
2928
2929
disabling = drm_atomic_plane_disabling(old_plane_state,
2930
new_plane_state);
2931
2932
if (active_only) {
2933
/*
2934
* Skip planes related to inactive CRTCs. If the plane
2935
* is enabled use the state of the current CRTC. If the
2936
* plane is being disabled use the state of the old
2937
* CRTC to avoid skipping planes being disabled on an
2938
* active CRTC.
2939
*/
2940
if (!disabling && !plane_crtc_active(new_plane_state))
2941
continue;
2942
if (disabling && !plane_crtc_active(old_plane_state))
2943
continue;
2944
}
2945
2946
/*
2947
* Special-case disabling the plane if drivers support it.
2948
*/
2949
if (disabling && funcs->atomic_disable) {
2950
struct drm_crtc_state *crtc_state;
2951
2952
crtc_state = old_plane_state->crtc->state;
2953
2954
if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2955
no_disable)
2956
continue;
2957
2958
funcs->atomic_disable(plane, state);
2959
} else if (new_plane_state->crtc || disabling) {
2960
funcs->atomic_update(plane, state);
2961
2962
if (!disabling && funcs->atomic_enable) {
2963
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
2964
funcs->atomic_enable(plane, state);
2965
}
2966
}
2967
}
2968
2969
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2970
const struct drm_crtc_helper_funcs *funcs;
2971
2972
funcs = crtc->helper_private;
2973
2974
if (!funcs || !funcs->atomic_flush)
2975
continue;
2976
2977
if (active_only && !new_crtc_state->active)
2978
continue;
2979
2980
funcs->atomic_flush(crtc, state);
2981
}
2982
2983
/*
2984
* Signal end of framebuffer access here before hw_done. After hw_done,
2985
* a later commit might have already released the plane state.
2986
*/
2987
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
2988
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
2989
2990
if (funcs->end_fb_access)
2991
funcs->end_fb_access(plane, old_plane_state);
2992
}
2993
}
2994
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
2995
2996
/**
2997
* drm_atomic_helper_commit_planes_on_crtc - commit plane state for a CRTC
2998
* @old_crtc_state: atomic state object with the old CRTC state
2999
*
3000
* This function commits the new plane state using the plane and atomic helper
3001
* functions for planes on the specific CRTC. It assumes that the atomic state
3002
* has already been pushed into the relevant object state pointers, since this
3003
* step can no longer fail.
3004
*
3005
* This function is useful when plane updates should be done CRTC-by-CRTC
3006
* instead of one global step like drm_atomic_helper_commit_planes() does.
3007
*
3008
* This function can only be savely used when planes are not allowed to move
3009
* between different CRTCs because this function doesn't handle inter-CRTC
3010
* dependencies. Callers need to ensure that either no such dependencies exist,
3011
* resolve them through ordering of commit calls or through some other means.
3012
*/
3013
void
3014
drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
3015
{
3016
const struct drm_crtc_helper_funcs *crtc_funcs;
3017
struct drm_crtc *crtc = old_crtc_state->crtc;
3018
struct drm_atomic_state *old_state = old_crtc_state->state;
3019
struct drm_crtc_state *new_crtc_state =
3020
drm_atomic_get_new_crtc_state(old_state, crtc);
3021
struct drm_plane *plane;
3022
unsigned int plane_mask;
3023
3024
plane_mask = old_crtc_state->plane_mask;
3025
plane_mask |= new_crtc_state->plane_mask;
3026
3027
crtc_funcs = crtc->helper_private;
3028
if (crtc_funcs && crtc_funcs->atomic_begin)
3029
crtc_funcs->atomic_begin(crtc, old_state);
3030
3031
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
3032
struct drm_plane_state *old_plane_state =
3033
drm_atomic_get_old_plane_state(old_state, plane);
3034
struct drm_plane_state *new_plane_state =
3035
drm_atomic_get_new_plane_state(old_state, plane);
3036
const struct drm_plane_helper_funcs *plane_funcs;
3037
bool disabling;
3038
3039
plane_funcs = plane->helper_private;
3040
3041
if (!old_plane_state || !plane_funcs)
3042
continue;
3043
3044
WARN_ON(new_plane_state->crtc &&
3045
new_plane_state->crtc != crtc);
3046
3047
disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
3048
3049
if (disabling && plane_funcs->atomic_disable) {
3050
plane_funcs->atomic_disable(plane, old_state);
3051
} else if (new_plane_state->crtc || disabling) {
3052
plane_funcs->atomic_update(plane, old_state);
3053
3054
if (!disabling && plane_funcs->atomic_enable) {
3055
if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
3056
plane_funcs->atomic_enable(plane, old_state);
3057
}
3058
}
3059
}
3060
3061
if (crtc_funcs && crtc_funcs->atomic_flush)
3062
crtc_funcs->atomic_flush(crtc, old_state);
3063
}
3064
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
3065
3066
/**
3067
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
3068
* @old_crtc_state: atomic state object with the old CRTC state
3069
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
3070
*
3071
* Disables all planes associated with the given CRTC. This can be
3072
* used for instance in the CRTC helper atomic_disable callback to disable
3073
* all planes.
3074
*
3075
* If the atomic-parameter is set the function calls the CRTC's
3076
* atomic_begin hook before and atomic_flush hook after disabling the
3077
* planes.
3078
*
3079
* It is a bug to call this function without having implemented the
3080
* &drm_plane_helper_funcs.atomic_disable plane hook.
3081
*/
3082
void
3083
drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
3084
bool atomic)
3085
{
3086
struct drm_crtc *crtc = old_crtc_state->crtc;
3087
const struct drm_crtc_helper_funcs *crtc_funcs =
3088
crtc->helper_private;
3089
struct drm_plane *plane;
3090
3091
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
3092
crtc_funcs->atomic_begin(crtc, NULL);
3093
3094
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
3095
const struct drm_plane_helper_funcs *plane_funcs =
3096
plane->helper_private;
3097
3098
if (!plane_funcs)
3099
continue;
3100
3101
WARN_ON(!plane_funcs->atomic_disable);
3102
if (plane_funcs->atomic_disable)
3103
plane_funcs->atomic_disable(plane, NULL);
3104
}
3105
3106
if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
3107
crtc_funcs->atomic_flush(crtc, NULL);
3108
}
3109
EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
3110
3111
/**
3112
* drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
3113
* @dev: DRM device
3114
* @state: atomic state object being committed
3115
*
3116
* This function cleans up plane state, specifically framebuffers, from the old
3117
* configuration. Hence the old configuration must be perserved in @state to
3118
* be able to call this function.
3119
*
3120
* This function may not be called on the new state when the atomic update
3121
* fails at any point after calling drm_atomic_helper_prepare_planes(). Use
3122
* drm_atomic_helper_unprepare_planes() in this case.
3123
*/
3124
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
3125
struct drm_atomic_state *state)
3126
{
3127
struct drm_plane *plane;
3128
struct drm_plane_state *old_plane_state;
3129
int i;
3130
3131
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3132
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
3133
3134
if (funcs->cleanup_fb)
3135
funcs->cleanup_fb(plane, old_plane_state);
3136
}
3137
}
3138
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
3139
3140
/**
3141
* drm_atomic_helper_swap_state - store atomic state into current sw state
3142
* @state: atomic state
3143
* @stall: stall for preceding commits
3144
*
3145
* This function stores the atomic state into the current state pointers in all
3146
* driver objects. It should be called after all failing steps have been done
3147
* and succeeded, but before the actual hardware state is committed.
3148
*
3149
* For cleanup and error recovery the current state for all changed objects will
3150
* be swapped into @state.
3151
*
3152
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
3153
*
3154
* 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
3155
*
3156
* 2. Do any other steps that might fail.
3157
*
3158
* 3. Put the staged state into the current state pointers with this function.
3159
*
3160
* 4. Actually commit the hardware state.
3161
*
3162
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
3163
* contains the old state. Also do any other cleanup required with that state.
3164
*
3165
* @stall must be set when nonblocking commits for this driver directly access
3166
* the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
3167
* the current atomic helpers this is almost always the case, since the helpers
3168
* don't pass the right state structures to the callbacks.
3169
*
3170
* Returns:
3171
* Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
3172
* waiting for the previous commits has been interrupted.
3173
*/
3174
int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
3175
bool stall)
3176
{
3177
int i, ret;
3178
unsigned long flags = 0;
3179
struct drm_connector *connector;
3180
struct drm_connector_state *old_conn_state, *new_conn_state;
3181
struct drm_crtc *crtc;
3182
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
3183
struct drm_plane *plane;
3184
struct drm_plane_state *old_plane_state, *new_plane_state;
3185
struct drm_crtc_commit *commit;
3186
struct drm_private_obj *obj;
3187
struct drm_private_state *old_obj_state, *new_obj_state;
3188
3189
if (stall) {
3190
/*
3191
* We have to stall for hw_done here before
3192
* drm_atomic_helper_wait_for_dependencies() because flip
3193
* depth > 1 is not yet supported by all drivers. As long as
3194
* obj->state is directly dereferenced anywhere in the drivers
3195
* atomic_commit_tail function, then it's unsafe to swap state
3196
* before drm_atomic_helper_commit_hw_done() is called.
3197
*/
3198
3199
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
3200
commit = old_crtc_state->commit;
3201
3202
if (!commit)
3203
continue;
3204
3205
ret = wait_for_completion_interruptible(&commit->hw_done);
3206
if (ret)
3207
return ret;
3208
}
3209
3210
for_each_old_connector_in_state(state, connector, old_conn_state, i) {
3211
commit = old_conn_state->commit;
3212
3213
if (!commit)
3214
continue;
3215
3216
ret = wait_for_completion_interruptible(&commit->hw_done);
3217
if (ret)
3218
return ret;
3219
}
3220
3221
for_each_old_plane_in_state(state, plane, old_plane_state, i) {
3222
commit = old_plane_state->commit;
3223
3224
if (!commit)
3225
continue;
3226
3227
ret = wait_for_completion_interruptible(&commit->hw_done);
3228
if (ret)
3229
return ret;
3230
}
3231
}
3232
3233
for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
3234
WARN_ON(connector->state != old_conn_state);
3235
3236
old_conn_state->state = state;
3237
new_conn_state->state = NULL;
3238
3239
state->connectors[i].state = old_conn_state;
3240
connector->state = new_conn_state;
3241
}
3242
3243
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3244
WARN_ON(crtc->state != old_crtc_state);
3245
3246
old_crtc_state->state = state;
3247
new_crtc_state->state = NULL;
3248
3249
state->crtcs[i].state = old_crtc_state;
3250
crtc->state = new_crtc_state;
3251
3252
if (new_crtc_state->commit) {
3253
spin_lock(&crtc->commit_lock);
3254
list_add(&new_crtc_state->commit->commit_entry,
3255
&crtc->commit_list);
3256
spin_unlock(&crtc->commit_lock);
3257
3258
new_crtc_state->commit->event = NULL;
3259
}
3260
}
3261
3262
drm_panic_lock(state->dev, flags);
3263
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3264
WARN_ON(plane->state != old_plane_state);
3265
3266
old_plane_state->state = state;
3267
new_plane_state->state = NULL;
3268
3269
state->planes[i].state = old_plane_state;
3270
plane->state = new_plane_state;
3271
}
3272
drm_panic_unlock(state->dev, flags);
3273
3274
for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
3275
WARN_ON(obj->state != old_obj_state);
3276
3277
old_obj_state->state = state;
3278
new_obj_state->state = NULL;
3279
3280
state->private_objs[i].state = old_obj_state;
3281
obj->state = new_obj_state;
3282
}
3283
3284
return 0;
3285
}
3286
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
3287
3288
/**
3289
* drm_atomic_helper_update_plane - Helper for primary plane update using atomic
3290
* @plane: plane object to update
3291
* @crtc: owning CRTC of owning plane
3292
* @fb: framebuffer to flip onto plane
3293
* @crtc_x: x offset of primary plane on @crtc
3294
* @crtc_y: y offset of primary plane on @crtc
3295
* @crtc_w: width of primary plane rectangle on @crtc
3296
* @crtc_h: height of primary plane rectangle on @crtc
3297
* @src_x: x offset of @fb for panning
3298
* @src_y: y offset of @fb for panning
3299
* @src_w: width of source rectangle in @fb
3300
* @src_h: height of source rectangle in @fb
3301
* @ctx: lock acquire context
3302
*
3303
* Provides a default plane update handler using the atomic driver interface.
3304
*
3305
* RETURNS:
3306
* Zero on success, error code on failure
3307
*/
3308
int drm_atomic_helper_update_plane(struct drm_plane *plane,
3309
struct drm_crtc *crtc,
3310
struct drm_framebuffer *fb,
3311
int crtc_x, int crtc_y,
3312
unsigned int crtc_w, unsigned int crtc_h,
3313
uint32_t src_x, uint32_t src_y,
3314
uint32_t src_w, uint32_t src_h,
3315
struct drm_modeset_acquire_ctx *ctx)
3316
{
3317
struct drm_atomic_state *state;
3318
struct drm_plane_state *plane_state;
3319
int ret = 0;
3320
3321
state = drm_atomic_state_alloc(plane->dev);
3322
if (!state)
3323
return -ENOMEM;
3324
3325
state->acquire_ctx = ctx;
3326
plane_state = drm_atomic_get_plane_state(state, plane);
3327
if (IS_ERR(plane_state)) {
3328
ret = PTR_ERR(plane_state);
3329
goto fail;
3330
}
3331
3332
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3333
if (ret != 0)
3334
goto fail;
3335
drm_atomic_set_fb_for_plane(plane_state, fb);
3336
plane_state->crtc_x = crtc_x;
3337
plane_state->crtc_y = crtc_y;
3338
plane_state->crtc_w = crtc_w;
3339
plane_state->crtc_h = crtc_h;
3340
plane_state->src_x = src_x;
3341
plane_state->src_y = src_y;
3342
plane_state->src_w = src_w;
3343
plane_state->src_h = src_h;
3344
3345
if (plane == crtc->cursor)
3346
state->legacy_cursor_update = true;
3347
3348
ret = drm_atomic_commit(state);
3349
fail:
3350
drm_atomic_state_put(state);
3351
return ret;
3352
}
3353
EXPORT_SYMBOL(drm_atomic_helper_update_plane);
3354
3355
/**
3356
* drm_atomic_helper_disable_plane - Helper for primary plane disable using atomic
3357
* @plane: plane to disable
3358
* @ctx: lock acquire context
3359
*
3360
* Provides a default plane disable handler using the atomic driver interface.
3361
*
3362
* RETURNS:
3363
* Zero on success, error code on failure
3364
*/
3365
int drm_atomic_helper_disable_plane(struct drm_plane *plane,
3366
struct drm_modeset_acquire_ctx *ctx)
3367
{
3368
struct drm_atomic_state *state;
3369
struct drm_plane_state *plane_state;
3370
int ret = 0;
3371
3372
state = drm_atomic_state_alloc(plane->dev);
3373
if (!state)
3374
return -ENOMEM;
3375
3376
state->acquire_ctx = ctx;
3377
plane_state = drm_atomic_get_plane_state(state, plane);
3378
if (IS_ERR(plane_state)) {
3379
ret = PTR_ERR(plane_state);
3380
goto fail;
3381
}
3382
3383
if (plane_state->crtc && plane_state->crtc->cursor == plane)
3384
plane_state->state->legacy_cursor_update = true;
3385
3386
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
3387
if (ret != 0)
3388
goto fail;
3389
3390
ret = drm_atomic_commit(state);
3391
fail:
3392
drm_atomic_state_put(state);
3393
return ret;
3394
}
3395
EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
3396
3397
/**
3398
* drm_atomic_helper_set_config - set a new config from userspace
3399
* @set: mode set configuration
3400
* @ctx: lock acquisition context
3401
*
3402
* Provides a default CRTC set_config handler using the atomic driver interface.
3403
*
3404
* NOTE: For backwards compatibility with old userspace this automatically
3405
* resets the "link-status" property to GOOD, to force any link
3406
* re-training. The SETCRTC ioctl does not define whether an update does
3407
* need a full modeset or just a plane update, hence we're allowed to do
3408
* that. See also drm_connector_set_link_status_property().
3409
*
3410
* Returns:
3411
* Returns 0 on success, negative errno numbers on failure.
3412
*/
3413
int drm_atomic_helper_set_config(struct drm_mode_set *set,
3414
struct drm_modeset_acquire_ctx *ctx)
3415
{
3416
struct drm_atomic_state *state;
3417
struct drm_crtc *crtc = set->crtc;
3418
int ret = 0;
3419
3420
state = drm_atomic_state_alloc(crtc->dev);
3421
if (!state)
3422
return -ENOMEM;
3423
3424
state->acquire_ctx = ctx;
3425
ret = __drm_atomic_helper_set_config(set, state);
3426
if (ret != 0)
3427
goto fail;
3428
3429
ret = handle_conflicting_encoders(state, true);
3430
if (ret)
3431
goto fail;
3432
3433
ret = drm_atomic_commit(state);
3434
3435
fail:
3436
drm_atomic_state_put(state);
3437
return ret;
3438
}
3439
EXPORT_SYMBOL(drm_atomic_helper_set_config);
3440
3441
/**
3442
* drm_atomic_helper_disable_all - disable all currently active outputs
3443
* @dev: DRM device
3444
* @ctx: lock acquisition context
3445
*
3446
* Loops through all connectors, finding those that aren't turned off and then
3447
* turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3448
* that they are connected to.
3449
*
3450
* This is used for example in suspend/resume to disable all currently active
3451
* functions when suspending. If you just want to shut down everything at e.g.
3452
* driver unload, look at drm_atomic_helper_shutdown().
3453
*
3454
* Note that if callers haven't already acquired all modeset locks this might
3455
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3456
*
3457
* Returns:
3458
* 0 on success or a negative error code on failure.
3459
*
3460
* See also:
3461
* drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3462
* drm_atomic_helper_shutdown().
3463
*/
3464
int drm_atomic_helper_disable_all(struct drm_device *dev,
3465
struct drm_modeset_acquire_ctx *ctx)
3466
{
3467
struct drm_atomic_state *state;
3468
struct drm_connector_state *conn_state;
3469
struct drm_connector *conn;
3470
struct drm_plane_state *plane_state;
3471
struct drm_plane *plane;
3472
struct drm_crtc_state *crtc_state;
3473
struct drm_crtc *crtc;
3474
int ret, i;
3475
3476
state = drm_atomic_state_alloc(dev);
3477
if (!state)
3478
return -ENOMEM;
3479
3480
state->acquire_ctx = ctx;
3481
3482
drm_for_each_crtc(crtc, dev) {
3483
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3484
if (IS_ERR(crtc_state)) {
3485
ret = PTR_ERR(crtc_state);
3486
goto free;
3487
}
3488
3489
crtc_state->active = false;
3490
3491
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
3492
if (ret < 0)
3493
goto free;
3494
3495
ret = drm_atomic_add_affected_planes(state, crtc);
3496
if (ret < 0)
3497
goto free;
3498
3499
ret = drm_atomic_add_affected_connectors(state, crtc);
3500
if (ret < 0)
3501
goto free;
3502
}
3503
3504
for_each_new_connector_in_state(state, conn, conn_state, i) {
3505
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
3506
if (ret < 0)
3507
goto free;
3508
}
3509
3510
for_each_new_plane_in_state(state, plane, plane_state, i) {
3511
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
3512
if (ret < 0)
3513
goto free;
3514
3515
drm_atomic_set_fb_for_plane(plane_state, NULL);
3516
}
3517
3518
ret = drm_atomic_commit(state);
3519
free:
3520
drm_atomic_state_put(state);
3521
return ret;
3522
}
3523
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3524
3525
/**
3526
* drm_atomic_helper_reset_crtc - reset the active outputs of a CRTC
3527
* @crtc: DRM CRTC
3528
* @ctx: lock acquisition context
3529
*
3530
* Reset the active outputs by indicating that connectors have changed.
3531
* This implies a reset of all active components available between the CRTC and
3532
* connectors.
3533
*
3534
* A variant of this function exists with
3535
* drm_bridge_helper_reset_crtc(), dedicated to bridges.
3536
*
3537
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
3538
* For drivers which optimize out unnecessary modesets this will result in
3539
* a no-op commit, achieving nothing.
3540
*
3541
* Returns:
3542
* 0 on success or a negative error code on failure.
3543
*/
3544
int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
3545
struct drm_modeset_acquire_ctx *ctx)
3546
{
3547
struct drm_atomic_state *state;
3548
struct drm_crtc_state *crtc_state;
3549
int ret;
3550
3551
state = drm_atomic_state_alloc(crtc->dev);
3552
if (!state)
3553
return -ENOMEM;
3554
3555
state->acquire_ctx = ctx;
3556
3557
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3558
if (IS_ERR(crtc_state)) {
3559
ret = PTR_ERR(crtc_state);
3560
goto out;
3561
}
3562
3563
crtc_state->connectors_changed = true;
3564
3565
ret = drm_atomic_commit(state);
3566
out:
3567
drm_atomic_state_put(state);
3568
3569
return ret;
3570
}
3571
EXPORT_SYMBOL(drm_atomic_helper_reset_crtc);
3572
3573
/**
3574
* drm_atomic_helper_shutdown - shutdown all CRTC
3575
* @dev: DRM device
3576
*
3577
* This shuts down all CRTC, which is useful for driver unloading. Shutdown on
3578
* suspend should instead be handled with drm_atomic_helper_suspend(), since
3579
* that also takes a snapshot of the modeset state to be restored on resume.
3580
*
3581
* This is just a convenience wrapper around drm_atomic_helper_disable_all(),
3582
* and it is the atomic version of drm_helper_force_disable_all().
3583
*/
3584
void drm_atomic_helper_shutdown(struct drm_device *dev)
3585
{
3586
struct drm_modeset_acquire_ctx ctx;
3587
int ret;
3588
3589
if (dev == NULL)
3590
return;
3591
3592
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3593
3594
ret = drm_atomic_helper_disable_all(dev, &ctx);
3595
if (ret)
3596
drm_err(dev,
3597
"Disabling all crtc's during unload failed with %i\n",
3598
ret);
3599
3600
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
3601
}
3602
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
3603
3604
/**
3605
* drm_atomic_helper_duplicate_state - duplicate an atomic state object
3606
* @dev: DRM device
3607
* @ctx: lock acquisition context
3608
*
3609
* Makes a copy of the current atomic state by looping over all objects and
3610
* duplicating their respective states. This is used for example by suspend/
3611
* resume support code to save the state prior to suspend such that it can
3612
* be restored upon resume.
3613
*
3614
* Note that this treats atomic state as persistent between save and restore.
3615
* Drivers must make sure that this is possible and won't result in confusion
3616
* or erroneous behaviour.
3617
*
3618
* Note that if callers haven't already acquired all modeset locks this might
3619
* return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3620
*
3621
* Returns:
3622
* A pointer to the copy of the atomic state object on success or an
3623
* ERR_PTR()-encoded error code on failure.
3624
*
3625
* See also:
3626
* drm_atomic_helper_suspend(), drm_atomic_helper_resume()
3627
*/
3628
struct drm_atomic_state *
3629
drm_atomic_helper_duplicate_state(struct drm_device *dev,
3630
struct drm_modeset_acquire_ctx *ctx)
3631
{
3632
struct drm_atomic_state *state;
3633
struct drm_connector *conn;
3634
struct drm_connector_list_iter conn_iter;
3635
struct drm_plane *plane;
3636
struct drm_crtc *crtc;
3637
int err = 0;
3638
3639
state = drm_atomic_state_alloc(dev);
3640
if (!state)
3641
return ERR_PTR(-ENOMEM);
3642
3643
state->acquire_ctx = ctx;
3644
state->duplicated = true;
3645
3646
drm_for_each_crtc(crtc, dev) {
3647
struct drm_crtc_state *crtc_state;
3648
3649
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3650
if (IS_ERR(crtc_state)) {
3651
err = PTR_ERR(crtc_state);
3652
goto free;
3653
}
3654
}
3655
3656
drm_for_each_plane(plane, dev) {
3657
struct drm_plane_state *plane_state;
3658
3659
plane_state = drm_atomic_get_plane_state(state, plane);
3660
if (IS_ERR(plane_state)) {
3661
err = PTR_ERR(plane_state);
3662
goto free;
3663
}
3664
}
3665
3666
drm_connector_list_iter_begin(dev, &conn_iter);
3667
drm_for_each_connector_iter(conn, &conn_iter) {
3668
struct drm_connector_state *conn_state;
3669
3670
conn_state = drm_atomic_get_connector_state(state, conn);
3671
if (IS_ERR(conn_state)) {
3672
err = PTR_ERR(conn_state);
3673
drm_connector_list_iter_end(&conn_iter);
3674
goto free;
3675
}
3676
}
3677
drm_connector_list_iter_end(&conn_iter);
3678
3679
/* clear the acquire context so that it isn't accidentally reused */
3680
state->acquire_ctx = NULL;
3681
3682
free:
3683
if (err < 0) {
3684
drm_atomic_state_put(state);
3685
state = ERR_PTR(err);
3686
}
3687
3688
return state;
3689
}
3690
EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
3691
3692
/**
3693
* drm_atomic_helper_suspend - subsystem-level suspend helper
3694
* @dev: DRM device
3695
*
3696
* Duplicates the current atomic state, disables all active outputs and then
3697
* returns a pointer to the original atomic state to the caller. Drivers can
3698
* pass this pointer to the drm_atomic_helper_resume() helper upon resume to
3699
* restore the output configuration that was active at the time the system
3700
* entered suspend.
3701
*
3702
* Note that it is potentially unsafe to use this. The atomic state object
3703
* returned by this function is assumed to be persistent. Drivers must ensure
3704
* that this holds true. Before calling this function, drivers must make sure
3705
* to suspend fbdev emulation so that nothing can be using the device.
3706
*
3707
* Returns:
3708
* A pointer to a copy of the state before suspend on success or an ERR_PTR()-
3709
* encoded error code on failure. Drivers should store the returned atomic
3710
* state object and pass it to the drm_atomic_helper_resume() helper upon
3711
* resume.
3712
*
3713
* See also:
3714
* drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
3715
* drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
3716
*/
3717
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
3718
{
3719
struct drm_modeset_acquire_ctx ctx;
3720
struct drm_atomic_state *state;
3721
int err;
3722
3723
/* This can never be returned, but it makes the compiler happy */
3724
state = ERR_PTR(-EINVAL);
3725
3726
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3727
3728
state = drm_atomic_helper_duplicate_state(dev, &ctx);
3729
if (IS_ERR(state))
3730
goto unlock;
3731
3732
err = drm_atomic_helper_disable_all(dev, &ctx);
3733
if (err < 0) {
3734
drm_atomic_state_put(state);
3735
state = ERR_PTR(err);
3736
goto unlock;
3737
}
3738
3739
unlock:
3740
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3741
if (err)
3742
return ERR_PTR(err);
3743
3744
return state;
3745
}
3746
EXPORT_SYMBOL(drm_atomic_helper_suspend);
3747
3748
/**
3749
* drm_atomic_helper_commit_duplicated_state - commit duplicated state
3750
* @state: duplicated atomic state to commit
3751
* @ctx: pointer to acquire_ctx to use for commit.
3752
*
3753
* The state returned by drm_atomic_helper_duplicate_state() and
3754
* drm_atomic_helper_suspend() is partially invalid, and needs to
3755
* be fixed up before commit.
3756
*
3757
* Returns:
3758
* 0 on success or a negative error code on failure.
3759
*
3760
* See also:
3761
* drm_atomic_helper_suspend()
3762
*/
3763
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
3764
struct drm_modeset_acquire_ctx *ctx)
3765
{
3766
int i, ret;
3767
struct drm_plane *plane;
3768
struct drm_plane_state *new_plane_state;
3769
struct drm_connector *connector;
3770
struct drm_connector_state *new_conn_state;
3771
struct drm_crtc *crtc;
3772
struct drm_crtc_state *new_crtc_state;
3773
3774
state->acquire_ctx = ctx;
3775
3776
for_each_new_plane_in_state(state, plane, new_plane_state, i)
3777
state->planes[i].old_state = plane->state;
3778
3779
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
3780
state->crtcs[i].old_state = crtc->state;
3781
3782
for_each_new_connector_in_state(state, connector, new_conn_state, i)
3783
state->connectors[i].old_state = connector->state;
3784
3785
ret = drm_atomic_commit(state);
3786
3787
state->acquire_ctx = NULL;
3788
3789
return ret;
3790
}
3791
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
3792
3793
/**
3794
* drm_atomic_helper_resume - subsystem-level resume helper
3795
* @dev: DRM device
3796
* @state: atomic state to resume to
3797
*
3798
* Calls drm_mode_config_reset() to synchronize hardware and software states,
3799
* grabs all modeset locks and commits the atomic state object. This can be
3800
* used in conjunction with the drm_atomic_helper_suspend() helper to
3801
* implement suspend/resume for drivers that support atomic mode-setting.
3802
*
3803
* Returns:
3804
* 0 on success or a negative error code on failure.
3805
*
3806
* See also:
3807
* drm_atomic_helper_suspend()
3808
*/
3809
int drm_atomic_helper_resume(struct drm_device *dev,
3810
struct drm_atomic_state *state)
3811
{
3812
struct drm_modeset_acquire_ctx ctx;
3813
int err;
3814
3815
drm_mode_config_reset(dev);
3816
3817
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
3818
3819
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
3820
3821
DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
3822
drm_atomic_state_put(state);
3823
3824
return err;
3825
}
3826
EXPORT_SYMBOL(drm_atomic_helper_resume);
3827
3828
static int page_flip_common(struct drm_atomic_state *state,
3829
struct drm_crtc *crtc,
3830
struct drm_framebuffer *fb,
3831
struct drm_pending_vblank_event *event,
3832
uint32_t flags)
3833
{
3834
struct drm_plane *plane = crtc->primary;
3835
struct drm_plane_state *plane_state;
3836
struct drm_crtc_state *crtc_state;
3837
int ret = 0;
3838
3839
crtc_state = drm_atomic_get_crtc_state(state, crtc);
3840
if (IS_ERR(crtc_state))
3841
return PTR_ERR(crtc_state);
3842
3843
crtc_state->event = event;
3844
crtc_state->async_flip = flags & DRM_MODE_PAGE_FLIP_ASYNC;
3845
3846
plane_state = drm_atomic_get_plane_state(state, plane);
3847
if (IS_ERR(plane_state))
3848
return PTR_ERR(plane_state);
3849
3850
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
3851
if (ret != 0)
3852
return ret;
3853
drm_atomic_set_fb_for_plane(plane_state, fb);
3854
3855
/* Make sure we don't accidentally do a full modeset. */
3856
state->allow_modeset = false;
3857
if (!crtc_state->active) {
3858
drm_dbg_atomic(crtc->dev,
3859
"[CRTC:%d:%s] disabled, rejecting legacy flip\n",
3860
crtc->base.id, crtc->name);
3861
return -EINVAL;
3862
}
3863
3864
return ret;
3865
}
3866
3867
/**
3868
* drm_atomic_helper_page_flip - execute a legacy page flip
3869
* @crtc: DRM CRTC
3870
* @fb: DRM framebuffer
3871
* @event: optional DRM event to signal upon completion
3872
* @flags: flip flags for non-vblank sync'ed updates
3873
* @ctx: lock acquisition context
3874
*
3875
* Provides a default &drm_crtc_funcs.page_flip implementation
3876
* using the atomic driver interface.
3877
*
3878
* Returns:
3879
* Returns 0 on success, negative errno numbers on failure.
3880
*
3881
* See also:
3882
* drm_atomic_helper_page_flip_target()
3883
*/
3884
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
3885
struct drm_framebuffer *fb,
3886
struct drm_pending_vblank_event *event,
3887
uint32_t flags,
3888
struct drm_modeset_acquire_ctx *ctx)
3889
{
3890
struct drm_plane *plane = crtc->primary;
3891
struct drm_atomic_state *state;
3892
int ret = 0;
3893
3894
state = drm_atomic_state_alloc(plane->dev);
3895
if (!state)
3896
return -ENOMEM;
3897
3898
state->acquire_ctx = ctx;
3899
3900
ret = page_flip_common(state, crtc, fb, event, flags);
3901
if (ret != 0)
3902
goto fail;
3903
3904
ret = drm_atomic_nonblocking_commit(state);
3905
fail:
3906
drm_atomic_state_put(state);
3907
return ret;
3908
}
3909
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
3910
3911
/**
3912
* drm_atomic_helper_page_flip_target - do page flip on target vblank period.
3913
* @crtc: DRM CRTC
3914
* @fb: DRM framebuffer
3915
* @event: optional DRM event to signal upon completion
3916
* @flags: flip flags for non-vblank sync'ed updates
3917
* @target: specifying the target vblank period when the flip to take effect
3918
* @ctx: lock acquisition context
3919
*
3920
* Provides a default &drm_crtc_funcs.page_flip_target implementation.
3921
* Similar to drm_atomic_helper_page_flip() with extra parameter to specify
3922
* target vblank period to flip.
3923
*
3924
* Returns:
3925
* Returns 0 on success, negative errno numbers on failure.
3926
*/
3927
int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
3928
struct drm_framebuffer *fb,
3929
struct drm_pending_vblank_event *event,
3930
uint32_t flags,
3931
uint32_t target,
3932
struct drm_modeset_acquire_ctx *ctx)
3933
{
3934
struct drm_plane *plane = crtc->primary;
3935
struct drm_atomic_state *state;
3936
struct drm_crtc_state *crtc_state;
3937
int ret = 0;
3938
3939
state = drm_atomic_state_alloc(plane->dev);
3940
if (!state)
3941
return -ENOMEM;
3942
3943
state->acquire_ctx = ctx;
3944
3945
ret = page_flip_common(state, crtc, fb, event, flags);
3946
if (ret != 0)
3947
goto fail;
3948
3949
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3950
if (WARN_ON(!crtc_state)) {
3951
ret = -EINVAL;
3952
goto fail;
3953
}
3954
crtc_state->target_vblank = target;
3955
3956
ret = drm_atomic_nonblocking_commit(state);
3957
fail:
3958
drm_atomic_state_put(state);
3959
return ret;
3960
}
3961
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
3962
3963
/**
3964
* drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
3965
* the input end of a bridge
3966
* @bridge: bridge control structure
3967
* @bridge_state: new bridge state
3968
* @crtc_state: new CRTC state
3969
* @conn_state: new connector state
3970
* @output_fmt: tested output bus format
3971
* @num_input_fmts: will contain the size of the returned array
3972
*
3973
* This helper is a pluggable implementation of the
3974
* &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
3975
* modify the bus configuration between their input and their output. It
3976
* returns an array of input formats with a single element set to @output_fmt.
3977
*
3978
* RETURNS:
3979
* a valid format array of size @num_input_fmts, or NULL if the allocation
3980
* failed
3981
*/
3982
u32 *
3983
drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
3984
struct drm_bridge_state *bridge_state,
3985
struct drm_crtc_state *crtc_state,
3986
struct drm_connector_state *conn_state,
3987
u32 output_fmt,
3988
unsigned int *num_input_fmts)
3989
{
3990
u32 *input_fmts;
3991
3992
input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
3993
if (!input_fmts) {
3994
*num_input_fmts = 0;
3995
return NULL;
3996
}
3997
3998
*num_input_fmts = 1;
3999
input_fmts[0] = output_fmt;
4000
return input_fmts;
4001
}
4002
EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
4003
4004